Install Necessary Libraries¶
In [1]:
# Install required libraries
!pip install pandas numpy matplotlib seaborn plotly scikit-learn geopandas folium dash pycaret tensorflow keras xgboost pyspark lightgbm apache-spark apache-airflow apache-kafka nltk gensim shapely statsmodels mapbox scipy dask
Requirement already satisfied: pandas in d:\anaconda app\lib\site-packages (2.2.2)
Requirement already satisfied: numpy in d:\anaconda app\lib\site-packages (1.26.4)
Requirement already satisfied: matplotlib in d:\anaconda app\lib\site-packages (3.8.4)
Requirement already satisfied: seaborn in d:\anaconda app\lib\site-packages (0.13.2)
Requirement already satisfied: plotly in d:\anaconda app\lib\site-packages (5.22.0)
Requirement already satisfied: scikit-learn in d:\anaconda app\lib\site-packages (1.4.2)
Requirement already satisfied: geopandas in d:\anaconda app\lib\site-packages (1.0.1)
Requirement already satisfied: folium in d:\anaconda app\lib\site-packages (0.17.0)
Requirement already satisfied: dash in d:\anaconda app\lib\site-packages (2.18.0)
Collecting pycaret
Downloading pycaret-3.3.2-py3-none-any.whl.metadata (17 kB)
Requirement already satisfied: tensorflow in d:\anaconda app\lib\site-packages (2.17.0)
Requirement already satisfied: keras in d:\anaconda app\lib\site-packages (3.5.0)
Requirement already satisfied: xgboost in d:\anaconda app\lib\site-packages (2.1.1)
Collecting pyspark
Downloading pyspark-3.5.3.tar.gz (317.3 MB)
---------------------------------------- 0.0/317.3 MB ? eta -:--:--
---------------------------------------- 0.0/317.3 MB ? eta -:--:--
-------------------------------------- 0.0/317.3 MB 445.2 kB/s eta 0:11:53
-------------------------------------- 0.1/317.3 MB 550.5 kB/s eta 0:09:37
-------------------------------------- 0.1/317.3 MB 722.1 kB/s eta 0:07:20
-------------------------------------- 0.2/317.3 MB 958.4 kB/s eta 0:05:31
---------------------------------------- 0.3/317.3 MB 1.2 MB/s eta 0:04:19
---------------------------------------- 0.5/317.3 MB 1.6 MB/s eta 0:03:18
---------------------------------------- 0.6/317.3 MB 1.6 MB/s eta 0:03:13
---------------------------------------- 0.8/317.3 MB 2.1 MB/s eta 0:02:29
---------------------------------------- 1.0/317.3 MB 2.2 MB/s eta 0:02:23
---------------------------------------- 1.1/317.3 MB 2.3 MB/s eta 0:02:15
---------------------------------------- 1.4/317.3 MB 2.6 MB/s eta 0:02:02
---------------------------------------- 1.6/317.3 MB 2.7 MB/s eta 0:01:58
---------------------------------------- 1.7/317.3 MB 2.8 MB/s eta 0:01:54
---------------------------------------- 1.9/317.3 MB 2.9 MB/s eta 0:01:51
---------------------------------------- 2.1/317.3 MB 2.9 MB/s eta 0:01:48
---------------------------------------- 2.4/317.3 MB 3.1 MB/s eta 0:01:41
---------------------------------------- 2.7/317.3 MB 3.3 MB/s eta 0:01:35
---------------------------------------- 2.9/317.3 MB 3.4 MB/s eta 0:01:34
---------------------------------------- 3.2/317.3 MB 3.5 MB/s eta 0:01:30
---------------------------------------- 3.5/317.3 MB 3.6 MB/s eta 0:01:29
---------------------------------------- 3.5/317.3 MB 3.6 MB/s eta 0:01:28
---------------------------------------- 3.8/317.3 MB 3.7 MB/s eta 0:01:26
--------------------------------------- 4.0/317.3 MB 3.6 MB/s eta 0:01:26
--------------------------------------- 4.1/317.3 MB 3.5 MB/s eta 0:01:29
--------------------------------------- 4.4/317.3 MB 3.7 MB/s eta 0:01:26
--------------------------------------- 4.7/317.3 MB 3.8 MB/s eta 0:01:23
--------------------------------------- 4.7/317.3 MB 3.8 MB/s eta 0:01:23
--------------------------------------- 5.0/317.3 MB 3.7 MB/s eta 0:01:25
--------------------------------------- 5.2/317.3 MB 3.8 MB/s eta 0:01:23
--------------------------------------- 5.4/317.3 MB 3.7 MB/s eta 0:01:24
--------------------------------------- 5.6/317.3 MB 3.8 MB/s eta 0:01:23
--------------------------------------- 5.8/317.3 MB 3.8 MB/s eta 0:01:22
--------------------------------------- 6.0/317.3 MB 3.8 MB/s eta 0:01:23
--------------------------------------- 6.2/317.3 MB 3.8 MB/s eta 0:01:22
--------------------------------------- 6.4/317.3 MB 3.8 MB/s eta 0:01:22
--------------------------------------- 6.7/317.3 MB 3.9 MB/s eta 0:01:20
--------------------------------------- 6.9/317.3 MB 3.9 MB/s eta 0:01:19
--------------------------------------- 7.1/317.3 MB 3.9 MB/s eta 0:01:19
--------------------------------------- 7.5/317.3 MB 4.0 MB/s eta 0:01:17
--------------------------------------- 7.6/317.3 MB 4.1 MB/s eta 0:01:17
--------------------------------------- 7.9/317.3 MB 4.1 MB/s eta 0:01:16
- -------------------------------------- 8.3/317.3 MB 4.2 MB/s eta 0:01:15
- -------------------------------------- 8.3/317.3 MB 4.2 MB/s eta 0:01:15
- -------------------------------------- 8.7/317.3 MB 4.2 MB/s eta 0:01:14
- -------------------------------------- 8.9/317.3 MB 4.2 MB/s eta 0:01:14
- -------------------------------------- 9.0/317.3 MB 4.1 MB/s eta 0:01:15
- -------------------------------------- 9.4/317.3 MB 4.2 MB/s eta 0:01:14
- -------------------------------------- 9.7/317.3 MB 4.3 MB/s eta 0:01:13
- -------------------------------------- 9.7/317.3 MB 4.2 MB/s eta 0:01:13
- ------------------------------------- 10.0/317.3 MB 4.2 MB/s eta 0:01:13
- ------------------------------------- 10.3/317.3 MB 4.4 MB/s eta 0:01:11
- ------------------------------------- 10.4/317.3 MB 4.5 MB/s eta 0:01:09
- ------------------------------------- 10.7/317.3 MB 4.6 MB/s eta 0:01:07
- ------------------------------------- 10.9/317.3 MB 4.7 MB/s eta 0:01:05
- ------------------------------------- 11.1/317.3 MB 4.6 MB/s eta 0:01:06
- ------------------------------------- 11.2/317.3 MB 4.7 MB/s eta 0:01:05
- ------------------------------------- 11.4/317.3 MB 4.6 MB/s eta 0:01:06
- ------------------------------------- 11.5/317.3 MB 4.6 MB/s eta 0:01:07
- ------------------------------------- 11.7/317.3 MB 4.6 MB/s eta 0:01:07
- ------------------------------------- 11.9/317.3 MB 4.6 MB/s eta 0:01:07
- ------------------------------------- 12.1/317.3 MB 4.6 MB/s eta 0:01:07
- ------------------------------------- 12.4/317.3 MB 4.6 MB/s eta 0:01:06
- ------------------------------------- 12.6/317.3 MB 4.7 MB/s eta 0:01:06
- ------------------------------------- 12.9/317.3 MB 4.6 MB/s eta 0:01:07
- ------------------------------------- 13.2/317.3 MB 4.7 MB/s eta 0:01:06
- ------------------------------------- 13.4/317.3 MB 4.6 MB/s eta 0:01:06
- ------------------------------------- 13.7/317.3 MB 4.6 MB/s eta 0:01:06
- ------------------------------------- 14.0/317.3 MB 4.7 MB/s eta 0:01:04
- ------------------------------------- 14.1/317.3 MB 4.7 MB/s eta 0:01:05
- ------------------------------------- 14.3/317.3 MB 4.7 MB/s eta 0:01:04
- ------------------------------------- 14.6/317.3 MB 4.8 MB/s eta 0:01:04
- ------------------------------------- 14.8/317.3 MB 4.8 MB/s eta 0:01:03
- ------------------------------------- 15.0/317.3 MB 4.8 MB/s eta 0:01:03
- ------------------------------------- 15.3/317.3 MB 4.8 MB/s eta 0:01:03
- ------------------------------------- 15.5/317.3 MB 4.8 MB/s eta 0:01:04
- ------------------------------------- 15.7/317.3 MB 4.8 MB/s eta 0:01:03
- ------------------------------------- 16.0/317.3 MB 4.8 MB/s eta 0:01:03
- ------------------------------------- 16.0/317.3 MB 4.8 MB/s eta 0:01:03
- ------------------------------------- 16.3/317.3 MB 4.8 MB/s eta 0:01:04
-- ------------------------------------ 16.5/317.3 MB 4.8 MB/s eta 0:01:03
-- ------------------------------------ 16.5/317.3 MB 4.8 MB/s eta 0:01:03
-- ------------------------------------ 16.8/317.3 MB 4.7 MB/s eta 0:01:04
-- ------------------------------------ 17.1/317.3 MB 4.8 MB/s eta 0:01:03
-- ------------------------------------ 17.3/317.3 MB 4.8 MB/s eta 0:01:03
-- ------------------------------------ 17.4/317.3 MB 4.6 MB/s eta 0:01:05
-- ------------------------------------ 17.7/317.3 MB 4.7 MB/s eta 0:01:05
-- ------------------------------------ 17.9/317.3 MB 4.7 MB/s eta 0:01:05
-- ------------------------------------ 18.0/317.3 MB 4.6 MB/s eta 0:01:05
-- ------------------------------------ 18.3/317.3 MB 4.6 MB/s eta 0:01:05
-- ------------------------------------ 18.6/317.3 MB 4.7 MB/s eta 0:01:04
-- ------------------------------------ 18.7/317.3 MB 4.7 MB/s eta 0:01:04
-- ------------------------------------ 19.0/317.3 MB 4.6 MB/s eta 0:01:06
-- ------------------------------------ 19.3/317.3 MB 4.7 MB/s eta 0:01:04
-- ------------------------------------ 19.4/317.3 MB 4.6 MB/s eta 0:01:05
-- ------------------------------------ 19.7/317.3 MB 4.6 MB/s eta 0:01:05
-- ------------------------------------ 20.0/317.3 MB 4.6 MB/s eta 0:01:05
-- ------------------------------------ 20.2/317.3 MB 4.6 MB/s eta 0:01:04
-- ------------------------------------ 20.3/317.3 MB 4.5 MB/s eta 0:01:06
-- ------------------------------------ 20.7/317.3 MB 4.6 MB/s eta 0:01:05
-- ------------------------------------ 20.9/317.3 MB 4.7 MB/s eta 0:01:04
-- ------------------------------------ 21.1/317.3 MB 4.5 MB/s eta 0:01:06
-- ------------------------------------ 21.4/317.3 MB 4.7 MB/s eta 0:01:04
-- ------------------------------------ 21.7/317.3 MB 4.8 MB/s eta 0:01:02
-- ------------------------------------ 21.7/317.3 MB 4.7 MB/s eta 0:01:04
-- ------------------------------------ 21.9/317.3 MB 4.7 MB/s eta 0:01:04
-- ------------------------------------ 21.9/317.3 MB 4.7 MB/s eta 0:01:04
-- ------------------------------------ 21.9/317.3 MB 4.7 MB/s eta 0:01:04
-- ------------------------------------ 22.0/317.3 MB 4.5 MB/s eta 0:01:07
-- ------------------------------------ 22.0/317.3 MB 4.5 MB/s eta 0:01:07
-- ------------------------------------ 22.0/317.3 MB 4.5 MB/s eta 0:01:07
-- ------------------------------------ 22.0/317.3 MB 4.5 MB/s eta 0:01:07
-- ------------------------------------ 22.0/317.3 MB 4.5 MB/s eta 0:01:07
-- ------------------------------------ 22.0/317.3 MB 4.5 MB/s eta 0:01:07
-- ------------------------------------ 22.0/317.3 MB 4.5 MB/s eta 0:01:07
-- ------------------------------------ 23.2/317.3 MB 4.2 MB/s eta 0:01:10
-- ------------------------------------ 23.4/317.3 MB 4.3 MB/s eta 0:01:10
-- ------------------------------------ 23.4/317.3 MB 4.3 MB/s eta 0:01:10
-- ------------------------------------ 23.4/317.3 MB 4.3 MB/s eta 0:01:10
-- ------------------------------------ 23.5/317.3 MB 4.0 MB/s eta 0:01:14
-- ------------------------------------ 24.3/317.3 MB 4.2 MB/s eta 0:01:11
-- ------------------------------------ 24.3/317.3 MB 4.2 MB/s eta 0:01:11
-- ------------------------------------ 24.4/317.3 MB 4.1 MB/s eta 0:01:12
--- ----------------------------------- 24.6/317.3 MB 4.0 MB/s eta 0:01:13
--- ----------------------------------- 24.9/317.3 MB 4.0 MB/s eta 0:01:13
--- ----------------------------------- 25.1/317.3 MB 4.1 MB/s eta 0:01:12
--- ----------------------------------- 25.3/317.3 MB 4.0 MB/s eta 0:01:13
--- ----------------------------------- 25.6/317.3 MB 4.0 MB/s eta 0:01:13
--- ----------------------------------- 26.0/317.3 MB 4.1 MB/s eta 0:01:11
--- ----------------------------------- 26.2/317.3 MB 4.1 MB/s eta 0:01:11
--- ----------------------------------- 26.5/317.3 MB 4.2 MB/s eta 0:01:10
--- ----------------------------------- 26.6/317.3 MB 4.1 MB/s eta 0:01:11
--- ----------------------------------- 27.0/317.3 MB 4.2 MB/s eta 0:01:09
--- ----------------------------------- 27.3/317.3 MB 4.3 MB/s eta 0:01:08
--- ----------------------------------- 27.5/317.3 MB 4.2 MB/s eta 0:01:10
--- ----------------------------------- 27.9/317.3 MB 4.3 MB/s eta 0:01:09
--- ----------------------------------- 28.2/317.3 MB 4.3 MB/s eta 0:01:07
--- ----------------------------------- 28.4/317.3 MB 4.4 MB/s eta 0:01:07
--- ----------------------------------- 28.6/317.3 MB 4.3 MB/s eta 0:01:08
--- ----------------------------------- 29.0/317.3 MB 4.3 MB/s eta 0:01:07
--- ----------------------------------- 29.3/317.3 MB 4.4 MB/s eta 0:01:06
--- ----------------------------------- 29.3/317.3 MB 4.4 MB/s eta 0:01:06
--- ----------------------------------- 29.5/317.3 MB 4.3 MB/s eta 0:01:07
--- ----------------------------------- 29.6/317.3 MB 4.3 MB/s eta 0:01:08
--- ----------------------------------- 29.9/317.3 MB 4.3 MB/s eta 0:01:07
--- ----------------------------------- 30.2/317.3 MB 4.3 MB/s eta 0:01:07
--- ----------------------------------- 30.4/317.3 MB 4.3 MB/s eta 0:01:08
--- ----------------------------------- 30.6/317.3 MB 4.4 MB/s eta 0:01:06
--- ----------------------------------- 30.7/317.3 MB 4.3 MB/s eta 0:01:07
--- ----------------------------------- 31.0/317.3 MB 4.3 MB/s eta 0:01:07
--- ----------------------------------- 31.3/317.3 MB 4.4 MB/s eta 0:01:06
--- ----------------------------------- 31.6/317.3 MB 4.3 MB/s eta 0:01:06
--- ----------------------------------- 31.8/317.3 MB 4.3 MB/s eta 0:01:06
--- ----------------------------------- 32.1/317.3 MB 4.7 MB/s eta 0:01:01
--- ----------------------------------- 32.5/317.3 MB 5.5 MB/s eta 0:00:52
---- ---------------------------------- 32.6/317.3 MB 5.4 MB/s eta 0:00:53
---- ---------------------------------- 32.6/317.3 MB 5.2 MB/s eta 0:00:55
---- ---------------------------------- 32.9/317.3 MB 5.2 MB/s eta 0:00:56
---- ---------------------------------- 33.2/317.3 MB 5.0 MB/s eta 0:00:57
---- ---------------------------------- 33.5/317.3 MB 5.0 MB/s eta 0:00:57
---- ---------------------------------- 33.5/317.3 MB 4.8 MB/s eta 0:00:59
---- ---------------------------------- 33.6/317.3 MB 4.7 MB/s eta 0:01:00
---- ---------------------------------- 33.9/317.3 MB 5.1 MB/s eta 0:00:56
---- ---------------------------------- 34.2/317.3 MB 5.0 MB/s eta 0:00:57
---- ---------------------------------- 34.5/317.3 MB 5.1 MB/s eta 0:00:56
---- ---------------------------------- 34.8/317.3 MB 5.2 MB/s eta 0:00:54
---- ---------------------------------- 35.4/317.3 MB 5.2 MB/s eta 0:00:54
---- ---------------------------------- 35.7/317.3 MB 5.3 MB/s eta 0:00:53
---- ---------------------------------- 35.8/317.3 MB 5.2 MB/s eta 0:00:54
---- ---------------------------------- 36.1/317.3 MB 5.2 MB/s eta 0:00:54
---- ---------------------------------- 36.4/317.3 MB 5.2 MB/s eta 0:00:54
---- ---------------------------------- 36.6/317.3 MB 5.3 MB/s eta 0:00:54
---- ---------------------------------- 36.9/317.3 MB 5.3 MB/s eta 0:00:53
---- ---------------------------------- 37.2/317.3 MB 5.3 MB/s eta 0:00:53
---- ---------------------------------- 37.4/317.3 MB 5.3 MB/s eta 0:00:54
---- ---------------------------------- 37.6/317.3 MB 5.3 MB/s eta 0:00:53
---- ---------------------------------- 37.8/317.3 MB 5.3 MB/s eta 0:00:53
---- ---------------------------------- 38.1/317.3 MB 5.3 MB/s eta 0:00:53
---- ---------------------------------- 38.4/317.3 MB 5.3 MB/s eta 0:00:53
---- ---------------------------------- 38.7/317.3 MB 5.3 MB/s eta 0:00:53
---- ---------------------------------- 38.9/317.3 MB 5.3 MB/s eta 0:00:53
---- ---------------------------------- 39.0/317.3 MB 5.2 MB/s eta 0:00:54
---- ---------------------------------- 39.3/317.3 MB 5.2 MB/s eta 0:00:54
---- ---------------------------------- 39.6/317.3 MB 5.4 MB/s eta 0:00:52
---- ---------------------------------- 40.0/317.3 MB 5.5 MB/s eta 0:00:51
---- ---------------------------------- 40.2/317.3 MB 5.5 MB/s eta 0:00:51
---- ---------------------------------- 40.4/317.3 MB 5.4 MB/s eta 0:00:52
----- --------------------------------- 40.7/317.3 MB 5.5 MB/s eta 0:00:51
----- --------------------------------- 41.0/317.3 MB 5.6 MB/s eta 0:00:50
----- --------------------------------- 41.3/317.3 MB 5.6 MB/s eta 0:00:50
----- --------------------------------- 41.6/317.3 MB 5.6 MB/s eta 0:00:50
----- --------------------------------- 42.0/317.3 MB 5.6 MB/s eta 0:00:49
----- --------------------------------- 42.3/317.3 MB 5.6 MB/s eta 0:00:49
----- --------------------------------- 42.6/317.3 MB 5.6 MB/s eta 0:00:49
----- --------------------------------- 42.9/317.3 MB 5.9 MB/s eta 0:00:47
----- --------------------------------- 43.1/317.3 MB 5.8 MB/s eta 0:00:47
----- --------------------------------- 43.4/317.3 MB 5.8 MB/s eta 0:00:48
----- --------------------------------- 43.4/317.3 MB 5.7 MB/s eta 0:00:49
----- --------------------------------- 43.8/317.3 MB 5.9 MB/s eta 0:00:47
----- --------------------------------- 44.1/317.3 MB 6.0 MB/s eta 0:00:46
----- --------------------------------- 44.4/317.3 MB 6.0 MB/s eta 0:00:46
----- --------------------------------- 44.7/317.3 MB 6.0 MB/s eta 0:00:46
----- --------------------------------- 45.1/317.3 MB 6.1 MB/s eta 0:00:45
----- --------------------------------- 45.4/317.3 MB 6.0 MB/s eta 0:00:46
----- --------------------------------- 45.7/317.3 MB 6.1 MB/s eta 0:00:45
----- --------------------------------- 46.0/317.3 MB 6.1 MB/s eta 0:00:45
----- --------------------------------- 46.3/317.3 MB 6.1 MB/s eta 0:00:45
----- --------------------------------- 46.6/317.3 MB 6.1 MB/s eta 0:00:45
----- --------------------------------- 46.9/317.3 MB 6.1 MB/s eta 0:00:45
----- --------------------------------- 47.2/317.3 MB 6.1 MB/s eta 0:00:45
----- --------------------------------- 47.4/317.3 MB 6.1 MB/s eta 0:00:45
----- --------------------------------- 47.6/317.3 MB 6.1 MB/s eta 0:00:45
----- --------------------------------- 47.8/317.3 MB 6.0 MB/s eta 0:00:45
----- --------------------------------- 48.1/317.3 MB 6.1 MB/s eta 0:00:45
----- --------------------------------- 48.3/317.3 MB 6.1 MB/s eta 0:00:44
----- --------------------------------- 48.6/317.3 MB 6.0 MB/s eta 0:00:45
----- --------------------------------- 48.7/317.3 MB 6.1 MB/s eta 0:00:45
----- --------------------------------- 48.7/317.3 MB 6.1 MB/s eta 0:00:45
----- --------------------------------- 48.7/317.3 MB 6.1 MB/s eta 0:00:45
------ -------------------------------- 49.1/317.3 MB 5.7 MB/s eta 0:00:48
------ -------------------------------- 49.6/317.3 MB 5.8 MB/s eta 0:00:46
------ -------------------------------- 49.9/317.3 MB 5.8 MB/s eta 0:00:46
------ -------------------------------- 50.0/317.3 MB 5.7 MB/s eta 0:00:47
------ -------------------------------- 50.2/317.3 MB 5.6 MB/s eta 0:00:48
------ -------------------------------- 50.4/317.3 MB 5.6 MB/s eta 0:00:48
------ -------------------------------- 50.7/317.3 MB 5.7 MB/s eta 0:00:47
------ -------------------------------- 50.9/317.3 MB 5.6 MB/s eta 0:00:48
------ -------------------------------- 51.1/317.3 MB 5.6 MB/s eta 0:00:48
------ -------------------------------- 51.1/317.3 MB 5.6 MB/s eta 0:00:48
------ -------------------------------- 51.1/317.3 MB 5.3 MB/s eta 0:00:51
------ -------------------------------- 51.4/317.3 MB 5.3 MB/s eta 0:00:51
------ -------------------------------- 51.5/317.3 MB 5.3 MB/s eta 0:00:51
------ -------------------------------- 51.7/317.3 MB 5.2 MB/s eta 0:00:52
------ -------------------------------- 52.0/317.3 MB 5.2 MB/s eta 0:00:52
------ -------------------------------- 52.1/317.3 MB 5.1 MB/s eta 0:00:53
------ -------------------------------- 52.2/317.3 MB 5.0 MB/s eta 0:00:53
------ -------------------------------- 52.5/317.3 MB 5.0 MB/s eta 0:00:53
------ -------------------------------- 52.7/317.3 MB 5.0 MB/s eta 0:00:53
------ -------------------------------- 52.7/317.3 MB 5.0 MB/s eta 0:00:53
------ -------------------------------- 52.7/317.3 MB 5.0 MB/s eta 0:00:53
------ -------------------------------- 52.7/317.3 MB 5.0 MB/s eta 0:00:53
------ -------------------------------- 52.8/317.3 MB 4.6 MB/s eta 0:00:58
------ -------------------------------- 53.1/317.3 MB 4.6 MB/s eta 0:00:58
------ -------------------------------- 53.2/317.3 MB 4.5 MB/s eta 0:00:59
------ -------------------------------- 53.3/317.3 MB 4.5 MB/s eta 0:00:59
------ -------------------------------- 53.6/317.3 MB 4.5 MB/s eta 0:00:58
------ -------------------------------- 53.7/317.3 MB 4.6 MB/s eta 0:00:58
------ -------------------------------- 53.7/317.3 MB 4.5 MB/s eta 0:01:00
------ -------------------------------- 53.9/317.3 MB 4.4 MB/s eta 0:01:00
------ -------------------------------- 54.3/317.3 MB 4.4 MB/s eta 0:01:01
------ -------------------------------- 54.4/317.3 MB 4.4 MB/s eta 0:01:01
------ -------------------------------- 54.7/317.3 MB 4.4 MB/s eta 0:01:01
------ -------------------------------- 54.7/317.3 MB 4.3 MB/s eta 0:01:01
------ -------------------------------- 54.8/317.3 MB 4.2 MB/s eta 0:01:03
------ -------------------------------- 55.2/317.3 MB 4.2 MB/s eta 0:01:03
------ -------------------------------- 55.3/317.3 MB 4.2 MB/s eta 0:01:03
------ -------------------------------- 55.6/317.3 MB 4.1 MB/s eta 0:01:04
------ -------------------------------- 55.8/317.3 MB 4.1 MB/s eta 0:01:04
------ -------------------------------- 56.0/317.3 MB 4.1 MB/s eta 0:01:05
------ -------------------------------- 56.0/317.3 MB 4.1 MB/s eta 0:01:05
------ -------------------------------- 56.4/317.3 MB 4.0 MB/s eta 0:01:05
------ -------------------------------- 56.6/317.3 MB 4.0 MB/s eta 0:01:05
------ -------------------------------- 56.7/317.3 MB 3.9 MB/s eta 0:01:07
------ -------------------------------- 56.9/317.3 MB 3.9 MB/s eta 0:01:07
------- ------------------------------- 57.2/317.3 MB 3.9 MB/s eta 0:01:07
------- ------------------------------- 57.3/317.3 MB 3.9 MB/s eta 0:01:06
------- ------------------------------- 57.5/317.3 MB 3.9 MB/s eta 0:01:08
------- ------------------------------- 57.8/317.3 MB 3.9 MB/s eta 0:01:07
------- ------------------------------- 57.9/317.3 MB 3.9 MB/s eta 0:01:07
------- ------------------------------- 58.2/317.3 MB 3.9 MB/s eta 0:01:08
------- ------------------------------- 58.4/317.3 MB 3.8 MB/s eta 0:01:08
------- ------------------------------- 58.6/317.3 MB 3.8 MB/s eta 0:01:08
------- ------------------------------- 58.7/317.3 MB 3.8 MB/s eta 0:01:09
------- ------------------------------- 59.0/317.3 MB 4.0 MB/s eta 0:01:05
------- ------------------------------- 59.2/317.3 MB 4.0 MB/s eta 0:01:05
------- ------------------------------- 59.2/317.3 MB 4.0 MB/s eta 0:01:05
------- ------------------------------- 59.4/317.3 MB 3.8 MB/s eta 0:01:08
------- ------------------------------- 59.6/317.3 MB 3.7 MB/s eta 0:01:09
------- ------------------------------- 59.6/317.3 MB 3.7 MB/s eta 0:01:09
------- ------------------------------- 59.8/317.3 MB 3.7 MB/s eta 0:01:11
------- ------------------------------- 60.1/317.3 MB 3.7 MB/s eta 0:01:11
------- ------------------------------- 60.3/317.3 MB 3.7 MB/s eta 0:01:10
------- ------------------------------- 60.5/317.3 MB 3.7 MB/s eta 0:01:10
------- ------------------------------- 60.7/317.3 MB 3.7 MB/s eta 0:01:10
------- ------------------------------- 60.9/317.3 MB 3.6 MB/s eta 0:01:11
------- ------------------------------- 61.2/317.3 MB 3.7 MB/s eta 0:01:10
------- ------------------------------- 61.2/317.3 MB 3.6 MB/s eta 0:01:11
------- ------------------------------- 61.2/317.3 MB 3.6 MB/s eta 0:01:12
------- ------------------------------- 61.5/317.3 MB 3.7 MB/s eta 0:01:10
------- ------------------------------- 61.7/317.3 MB 3.7 MB/s eta 0:01:10
------- ------------------------------- 62.0/317.3 MB 3.7 MB/s eta 0:01:09
------- ------------------------------- 62.2/317.3 MB 3.7 MB/s eta 0:01:09
------- ------------------------------- 62.5/317.3 MB 3.8 MB/s eta 0:01:08
------- ------------------------------- 62.8/317.3 MB 3.8 MB/s eta 0:01:08
------- ------------------------------- 63.0/317.3 MB 4.1 MB/s eta 0:01:03
------- ------------------------------- 63.4/317.3 MB 4.1 MB/s eta 0:01:03
------- ------------------------------- 63.7/317.3 MB 4.1 MB/s eta 0:01:02
------- ------------------------------- 64.0/317.3 MB 4.1 MB/s eta 0:01:02
------- ------------------------------- 64.3/317.3 MB 4.3 MB/s eta 0:01:00
------- ------------------------------- 64.5/317.3 MB 4.3 MB/s eta 0:01:00
------- ------------------------------- 64.8/317.3 MB 4.3 MB/s eta 0:01:00
-------- ------------------------------ 65.1/317.3 MB 4.5 MB/s eta 0:00:57
-------- ------------------------------ 65.5/317.3 MB 4.5 MB/s eta 0:00:57
-------- ------------------------------ 65.7/317.3 MB 4.5 MB/s eta 0:00:56
-------- ------------------------------ 65.9/317.3 MB 4.5 MB/s eta 0:00:56
-------- ------------------------------ 66.0/317.3 MB 4.5 MB/s eta 0:00:57
-------- ------------------------------ 66.4/317.3 MB 4.6 MB/s eta 0:00:55
-------- ------------------------------ 66.8/317.3 MB 4.6 MB/s eta 0:00:55
-------- ------------------------------ 67.0/317.3 MB 4.7 MB/s eta 0:00:54
-------- ------------------------------ 67.3/317.3 MB 4.7 MB/s eta 0:00:54
-------- ------------------------------ 67.6/317.3 MB 4.8 MB/s eta 0:00:53
-------- ------------------------------ 68.0/317.3 MB 4.9 MB/s eta 0:00:52
-------- ------------------------------ 68.2/317.3 MB 5.0 MB/s eta 0:00:51
-------- ------------------------------ 68.4/317.3 MB 4.8 MB/s eta 0:00:52
-------- ------------------------------ 68.6/317.3 MB 4.9 MB/s eta 0:00:52
-------- ------------------------------ 68.7/317.3 MB 4.9 MB/s eta 0:00:52
-------- ------------------------------ 68.8/317.3 MB 4.7 MB/s eta 0:00:54
-------- ------------------------------ 69.0/317.3 MB 4.8 MB/s eta 0:00:52
-------- ------------------------------ 69.2/317.3 MB 4.8 MB/s eta 0:00:52
-------- ------------------------------ 69.5/317.3 MB 4.9 MB/s eta 0:00:51
-------- ------------------------------ 69.7/317.3 MB 4.9 MB/s eta 0:00:51
-------- ------------------------------ 69.9/317.3 MB 5.0 MB/s eta 0:00:50
-------- ------------------------------ 70.0/317.3 MB 5.0 MB/s eta 0:00:50
-------- ------------------------------ 70.2/317.3 MB 5.0 MB/s eta 0:00:50
-------- ------------------------------ 70.3/317.3 MB 5.0 MB/s eta 0:00:50
-------- ------------------------------ 70.3/317.3 MB 4.8 MB/s eta 0:00:52
-------- ------------------------------ 70.4/317.3 MB 4.8 MB/s eta 0:00:52
-------- ------------------------------ 70.5/317.3 MB 4.7 MB/s eta 0:00:53
-------- ------------------------------ 70.7/317.3 MB 4.6 MB/s eta 0:00:54
-------- ------------------------------ 70.7/317.3 MB 4.6 MB/s eta 0:00:54
-------- ------------------------------ 70.7/317.3 MB 4.6 MB/s eta 0:00:54
-------- ------------------------------ 71.4/317.3 MB 4.8 MB/s eta 0:00:52
-------- ------------------------------ 71.6/317.3 MB 4.8 MB/s eta 0:00:52
-------- ------------------------------ 71.9/317.3 MB 4.9 MB/s eta 0:00:51
-------- ------------------------------ 72.0/317.3 MB 4.8 MB/s eta 0:00:52
-------- ------------------------------ 72.2/317.3 MB 4.7 MB/s eta 0:00:52
-------- ------------------------------ 72.5/317.3 MB 4.7 MB/s eta 0:00:52
-------- ------------------------------ 72.6/317.3 MB 4.7 MB/s eta 0:00:52
-------- ------------------------------ 73.0/317.3 MB 4.7 MB/s eta 0:00:52
-------- ------------------------------ 73.2/317.3 MB 4.7 MB/s eta 0:00:52
--------- ----------------------------- 73.4/317.3 MB 4.7 MB/s eta 0:00:53
--------- ----------------------------- 73.7/317.3 MB 4.7 MB/s eta 0:00:53
--------- ----------------------------- 73.9/317.3 MB 4.7 MB/s eta 0:00:53
--------- ----------------------------- 74.0/317.3 MB 4.6 MB/s eta 0:00:54
--------- ----------------------------- 74.2/317.3 MB 4.6 MB/s eta 0:00:54
--------- ----------------------------- 74.5/317.3 MB 4.6 MB/s eta 0:00:54
--------- ----------------------------- 74.7/317.3 MB 4.5 MB/s eta 0:00:54
--------- ----------------------------- 75.0/317.3 MB 4.5 MB/s eta 0:00:54
--------- ----------------------------- 75.3/317.3 MB 4.5 MB/s eta 0:00:54
--------- ----------------------------- 75.6/317.3 MB 4.5 MB/s eta 0:00:54
--------- ----------------------------- 75.7/317.3 MB 4.5 MB/s eta 0:00:55
--------- ----------------------------- 75.9/317.3 MB 4.5 MB/s eta 0:00:55
--------- ----------------------------- 76.2/317.3 MB 4.5 MB/s eta 0:00:54
--------- ----------------------------- 76.4/317.3 MB 4.5 MB/s eta 0:00:54
--------- ----------------------------- 76.6/317.3 MB 4.5 MB/s eta 0:00:55
--------- ----------------------------- 76.9/317.3 MB 4.4 MB/s eta 0:00:55
--------- ----------------------------- 77.2/317.3 MB 4.4 MB/s eta 0:00:55
--------- ----------------------------- 77.2/317.3 MB 4.4 MB/s eta 0:00:56
--------- ----------------------------- 77.3/317.3 MB 4.3 MB/s eta 0:00:56
--------- ----------------------------- 77.6/317.3 MB 4.3 MB/s eta 0:00:57
--------- ----------------------------- 77.8/317.3 MB 4.3 MB/s eta 0:00:56
--------- ----------------------------- 78.0/317.3 MB 4.2 MB/s eta 0:00:57
--------- ----------------------------- 78.2/317.3 MB 4.2 MB/s eta 0:00:58
--------- ----------------------------- 78.4/317.3 MB 4.1 MB/s eta 0:00:58
--------- ----------------------------- 78.7/317.3 MB 4.2 MB/s eta 0:00:57
--------- ----------------------------- 78.9/317.3 MB 4.2 MB/s eta 0:00:57
--------- ----------------------------- 79.3/317.3 MB 4.4 MB/s eta 0:00:55
--------- ----------------------------- 79.5/317.3 MB 4.4 MB/s eta 0:00:55
--------- ----------------------------- 79.8/317.3 MB 4.4 MB/s eta 0:00:55
--------- ----------------------------- 80.1/317.3 MB 4.5 MB/s eta 0:00:53
--------- ----------------------------- 80.4/317.3 MB 4.5 MB/s eta 0:00:53
--------- ----------------------------- 80.6/317.3 MB 4.6 MB/s eta 0:00:51
--------- ----------------------------- 80.9/317.3 MB 4.9 MB/s eta 0:00:49
--------- ----------------------------- 81.2/317.3 MB 5.0 MB/s eta 0:00:47
--------- ----------------------------- 81.2/317.3 MB 4.9 MB/s eta 0:00:48
---------- ---------------------------- 81.4/317.3 MB 4.9 MB/s eta 0:00:49
---------- ---------------------------- 81.7/317.3 MB 4.8 MB/s eta 0:00:49
---------- ---------------------------- 82.0/317.3 MB 4.8 MB/s eta 0:00:49
---------- ---------------------------- 82.3/317.3 MB 4.9 MB/s eta 0:00:48
---------- ---------------------------- 82.6/317.3 MB 5.0 MB/s eta 0:00:48
---------- ---------------------------- 82.7/317.3 MB 4.9 MB/s eta 0:00:49
---------- ---------------------------- 82.9/317.3 MB 4.9 MB/s eta 0:00:48
---------- ---------------------------- 83.2/317.3 MB 4.9 MB/s eta 0:00:48
---------- ---------------------------- 83.5/317.3 MB 5.0 MB/s eta 0:00:47
---------- ---------------------------- 83.8/317.3 MB 5.0 MB/s eta 0:00:47
---------- ---------------------------- 83.9/317.3 MB 4.9 MB/s eta 0:00:49
---------- ---------------------------- 84.1/317.3 MB 4.8 MB/s eta 0:00:49
---------- ---------------------------- 84.3/317.3 MB 4.9 MB/s eta 0:00:48
---------- ---------------------------- 84.5/317.3 MB 4.9 MB/s eta 0:00:48
---------- ---------------------------- 84.7/317.3 MB 4.8 MB/s eta 0:00:48
---------- ---------------------------- 85.0/317.3 MB 4.9 MB/s eta 0:00:48
---------- ---------------------------- 85.2/317.3 MB 4.8 MB/s eta 0:00:48
---------- ---------------------------- 85.4/317.3 MB 4.9 MB/s eta 0:00:48
---------- ---------------------------- 85.7/317.3 MB 4.8 MB/s eta 0:00:49
---------- ---------------------------- 86.0/317.3 MB 4.9 MB/s eta 0:00:48
---------- ---------------------------- 86.3/317.3 MB 4.9 MB/s eta 0:00:47
---------- ---------------------------- 86.6/317.3 MB 5.0 MB/s eta 0:00:47
---------- ---------------------------- 86.7/317.3 MB 4.9 MB/s eta 0:00:47
---------- ---------------------------- 87.0/317.3 MB 5.0 MB/s eta 0:00:47
---------- ---------------------------- 87.3/317.3 MB 5.0 MB/s eta 0:00:47
---------- ---------------------------- 87.6/317.3 MB 5.2 MB/s eta 0:00:45
---------- ---------------------------- 87.9/317.3 MB 5.2 MB/s eta 0:00:45
---------- ---------------------------- 88.1/317.3 MB 5.2 MB/s eta 0:00:45
---------- ---------------------------- 88.2/317.3 MB 5.2 MB/s eta 0:00:45
---------- ---------------------------- 88.2/317.3 MB 5.2 MB/s eta 0:00:45
---------- ---------------------------- 88.3/317.3 MB 5.0 MB/s eta 0:00:46
---------- ---------------------------- 88.3/317.3 MB 5.0 MB/s eta 0:00:46
---------- ---------------------------- 88.3/317.3 MB 4.7 MB/s eta 0:00:49
---------- ---------------------------- 88.3/317.3 MB 4.7 MB/s eta 0:00:49
---------- ---------------------------- 88.4/317.3 MB 4.6 MB/s eta 0:00:50
---------- ---------------------------- 88.7/317.3 MB 4.6 MB/s eta 0:00:50
---------- ---------------------------- 88.9/317.3 MB 4.6 MB/s eta 0:00:50
---------- ---------------------------- 89.0/317.3 MB 4.5 MB/s eta 0:00:51
---------- ---------------------------- 89.2/317.3 MB 4.5 MB/s eta 0:00:51
---------- ---------------------------- 89.4/317.3 MB 4.5 MB/s eta 0:00:52
----------- --------------------------- 89.7/317.3 MB 4.5 MB/s eta 0:00:51
----------- --------------------------- 90.0/317.3 MB 4.5 MB/s eta 0:00:51
----------- --------------------------- 90.2/317.3 MB 4.4 MB/s eta 0:00:52
----------- --------------------------- 90.5/317.3 MB 4.4 MB/s eta 0:00:52
----------- --------------------------- 90.8/317.3 MB 4.5 MB/s eta 0:00:51
----------- --------------------------- 91.1/317.3 MB 4.5 MB/s eta 0:00:51
----------- --------------------------- 91.3/317.3 MB 4.5 MB/s eta 0:00:51
----------- --------------------------- 91.6/317.3 MB 4.6 MB/s eta 0:00:50
----------- --------------------------- 91.9/317.3 MB 4.6 MB/s eta 0:00:50
----------- --------------------------- 92.3/317.3 MB 4.6 MB/s eta 0:00:49
----------- --------------------------- 92.5/317.3 MB 4.6 MB/s eta 0:00:50
----------- --------------------------- 92.7/317.3 MB 4.5 MB/s eta 0:00:50
----------- --------------------------- 93.0/317.3 MB 4.6 MB/s eta 0:00:49
----------- --------------------------- 93.3/317.3 MB 4.6 MB/s eta 0:00:49
----------- --------------------------- 93.6/317.3 MB 4.6 MB/s eta 0:00:49
----------- --------------------------- 94.0/317.3 MB 4.7 MB/s eta 0:00:48
----------- --------------------------- 94.3/317.3 MB 4.8 MB/s eta 0:00:47
----------- --------------------------- 94.7/317.3 MB 4.8 MB/s eta 0:00:47
----------- --------------------------- 95.0/317.3 MB 4.9 MB/s eta 0:00:46
----------- --------------------------- 95.3/317.3 MB 5.0 MB/s eta 0:00:45
----------- --------------------------- 95.5/317.3 MB 5.0 MB/s eta 0:00:45
----------- --------------------------- 95.7/317.3 MB 5.0 MB/s eta 0:00:45
----------- --------------------------- 96.1/317.3 MB 5.0 MB/s eta 0:00:45
----------- --------------------------- 96.2/317.3 MB 4.9 MB/s eta 0:00:45
----------- --------------------------- 96.3/317.3 MB 4.9 MB/s eta 0:00:46
----------- --------------------------- 96.3/317.3 MB 4.8 MB/s eta 0:00:47
----------- --------------------------- 96.3/317.3 MB 4.8 MB/s eta 0:00:47
----------- --------------------------- 96.5/317.3 MB 4.6 MB/s eta 0:00:49
----------- --------------------------- 96.6/317.3 MB 4.5 MB/s eta 0:00:49
----------- --------------------------- 96.8/317.3 MB 4.5 MB/s eta 0:00:49
----------- --------------------------- 96.8/317.3 MB 4.5 MB/s eta 0:00:49
----------- --------------------------- 96.8/317.3 MB 4.5 MB/s eta 0:00:49
----------- --------------------------- 97.5/317.3 MB 4.5 MB/s eta 0:00:49
------------ -------------------------- 97.7/317.3 MB 4.5 MB/s eta 0:00:49
------------ -------------------------- 98.0/317.3 MB 4.5 MB/s eta 0:00:49
------------ -------------------------- 98.2/317.3 MB 4.5 MB/s eta 0:00:50
------------ -------------------------- 98.4/317.3 MB 4.5 MB/s eta 0:00:49
------------ -------------------------- 98.5/317.3 MB 5.1 MB/s eta 0:00:43
------------ -------------------------- 98.7/317.3 MB 5.0 MB/s eta 0:00:44
------------ -------------------------- 98.9/317.3 MB 5.0 MB/s eta 0:00:44
------------ -------------------------- 99.1/317.3 MB 4.9 MB/s eta 0:00:45
------------ -------------------------- 99.3/317.3 MB 5.0 MB/s eta 0:00:44
------------ -------------------------- 99.5/317.3 MB 5.0 MB/s eta 0:00:44
------------ -------------------------- 99.7/317.3 MB 5.0 MB/s eta 0:00:44
----------- -------------------------- 100.0/317.3 MB 5.0 MB/s eta 0:00:44
----------- -------------------------- 100.0/317.3 MB 4.8 MB/s eta 0:00:45
------------ ------------------------- 100.2/317.3 MB 4.9 MB/s eta 0:00:45
------------ ------------------------- 100.2/317.3 MB 4.7 MB/s eta 0:00:46
------------ ------------------------- 100.3/317.3 MB 4.7 MB/s eta 0:00:47
------------ ------------------------- 100.4/317.3 MB 4.6 MB/s eta 0:00:47
------------ ------------------------- 100.5/317.3 MB 4.6 MB/s eta 0:00:48
------------ ------------------------- 100.6/317.3 MB 4.5 MB/s eta 0:00:48
------------ ------------------------- 100.7/317.3 MB 4.5 MB/s eta 0:00:49
------------ ------------------------- 100.8/317.3 MB 4.4 MB/s eta 0:00:49
------------ ------------------------- 100.9/317.3 MB 4.4 MB/s eta 0:00:49
------------ ------------------------- 101.0/317.3 MB 4.3 MB/s eta 0:00:51
------------ ------------------------- 101.2/317.3 MB 4.2 MB/s eta 0:00:52
------------ ------------------------- 101.3/317.3 MB 4.2 MB/s eta 0:00:52
------------ ------------------------- 101.4/317.3 MB 4.1 MB/s eta 0:00:53
------------ ------------------------- 101.6/317.3 MB 4.1 MB/s eta 0:00:53
------------ ------------------------- 101.8/317.3 MB 4.1 MB/s eta 0:00:53
------------ ------------------------- 102.0/317.3 MB 4.1 MB/s eta 0:00:53
------------ ------------------------- 102.1/317.3 MB 4.0 MB/s eta 0:00:54
------------ ------------------------- 102.2/317.3 MB 4.0 MB/s eta 0:00:54
------------ ------------------------- 102.2/317.3 MB 3.9 MB/s eta 0:00:55
------------ ------------------------- 102.5/317.3 MB 3.9 MB/s eta 0:00:55
------------ ------------------------- 102.7/317.3 MB 3.9 MB/s eta 0:00:55
------------ ------------------------- 103.0/317.3 MB 3.9 MB/s eta 0:00:56
------------ ------------------------- 103.2/317.3 MB 3.9 MB/s eta 0:00:55
------------ ------------------------- 103.3/317.3 MB 3.9 MB/s eta 0:00:56
------------ ------------------------- 103.6/317.3 MB 3.8 MB/s eta 0:00:57
------------ ------------------------- 103.8/317.3 MB 3.8 MB/s eta 0:00:56
------------ ------------------------- 103.9/317.3 MB 3.8 MB/s eta 0:00:56
------------ ------------------------- 104.3/317.3 MB 3.8 MB/s eta 0:00:57
------------ ------------------------- 104.4/317.3 MB 3.8 MB/s eta 0:00:57
------------ ------------------------- 104.5/317.3 MB 3.7 MB/s eta 0:00:58
------------ ------------------------- 104.8/317.3 MB 3.7 MB/s eta 0:00:58
------------ ------------------------- 105.0/317.3 MB 3.7 MB/s eta 0:00:58
------------ ------------------------- 105.1/317.3 MB 3.6 MB/s eta 0:00:59
------------ ------------------------- 105.2/317.3 MB 3.6 MB/s eta 0:00:59
------------ ------------------------- 105.2/317.3 MB 3.6 MB/s eta 0:00:59
------------ ------------------------- 105.2/317.3 MB 3.6 MB/s eta 0:00:59
------------ ------------------------- 105.8/317.3 MB 3.5 MB/s eta 0:01:00
------------ ------------------------- 105.8/317.3 MB 3.5 MB/s eta 0:01:01
------------ ------------------------- 105.9/317.3 MB 3.4 MB/s eta 0:01:02
------------ ------------------------- 106.1/317.3 MB 3.4 MB/s eta 0:01:02
------------ ------------------------- 106.2/317.3 MB 3.4 MB/s eta 0:01:02
------------ ------------------------- 106.4/317.3 MB 3.4 MB/s eta 0:01:03
------------ ------------------------- 106.6/317.3 MB 3.5 MB/s eta 0:01:00
------------ ------------------------- 106.7/317.3 MB 3.5 MB/s eta 0:01:00
------------ ------------------------- 106.9/317.3 MB 3.5 MB/s eta 0:01:01
------------ ------------------------- 107.0/317.3 MB 3.5 MB/s eta 0:01:01
------------ ------------------------- 107.0/317.3 MB 3.5 MB/s eta 0:01:01
------------ ------------------------- 107.2/317.3 MB 3.5 MB/s eta 0:01:00
------------ ------------------------- 107.4/317.3 MB 3.5 MB/s eta 0:01:01
------------ ------------------------- 107.5/317.3 MB 3.4 MB/s eta 0:01:02
------------ ------------------------- 107.5/317.3 MB 3.4 MB/s eta 0:01:03
------------ ------------------------- 107.8/317.3 MB 3.3 MB/s eta 0:01:03
------------ ------------------------- 107.9/317.3 MB 3.4 MB/s eta 0:01:03
------------ ------------------------- 107.9/317.3 MB 3.3 MB/s eta 0:01:05
------------ ------------------------- 108.1/317.3 MB 3.3 MB/s eta 0:01:05
------------ ------------------------- 108.3/317.3 MB 3.2 MB/s eta 0:01:05
------------ ------------------------- 108.3/317.3 MB 3.2 MB/s eta 0:01:05
------------- ------------------------ 108.6/317.3 MB 3.2 MB/s eta 0:01:06
------------- ------------------------ 108.8/317.3 MB 3.2 MB/s eta 0:01:06
------------- ------------------------ 108.8/317.3 MB 3.2 MB/s eta 0:01:06
------------- ------------------------ 109.1/317.3 MB 3.2 MB/s eta 0:01:06
------------- ------------------------ 109.2/317.3 MB 3.2 MB/s eta 0:01:06
------------- ------------------------ 109.2/317.3 MB 3.2 MB/s eta 0:01:06
------------- ------------------------ 109.4/317.3 MB 3.1 MB/s eta 0:01:08
------------- ------------------------ 109.6/317.3 MB 3.1 MB/s eta 0:01:07
------------- ------------------------ 109.6/317.3 MB 3.1 MB/s eta 0:01:07
------------- ------------------------ 109.8/317.3 MB 3.1 MB/s eta 0:01:08
------------- ------------------------ 110.1/317.3 MB 3.1 MB/s eta 0:01:08
------------- ------------------------ 110.1/317.3 MB 3.1 MB/s eta 0:01:08
------------- ------------------------ 110.1/317.3 MB 3.0 MB/s eta 0:01:09
------------- ------------------------ 110.5/317.3 MB 3.1 MB/s eta 0:01:08
------------- ------------------------ 110.5/317.3 MB 3.1 MB/s eta 0:01:06
------------- ------------------------ 110.6/317.3 MB 3.1 MB/s eta 0:01:08
------------- ------------------------ 110.9/317.3 MB 3.2 MB/s eta 0:01:05
------------- ------------------------ 111.0/317.3 MB 3.2 MB/s eta 0:01:05
------------- ------------------------ 111.0/317.3 MB 3.1 MB/s eta 0:01:06
------------- ------------------------ 111.2/317.3 MB 3.2 MB/s eta 0:01:04
------------- ------------------------ 111.4/317.3 MB 3.2 MB/s eta 0:01:05
------------- ------------------------ 111.7/317.3 MB 3.3 MB/s eta 0:01:03
------------- ------------------------ 112.0/317.3 MB 3.3 MB/s eta 0:01:03
------------- ------------------------ 112.2/317.3 MB 3.3 MB/s eta 0:01:03
------------- ------------------------ 112.4/317.3 MB 3.3 MB/s eta 0:01:02
------------- ------------------------ 112.6/317.3 MB 3.4 MB/s eta 0:01:01
------------- ------------------------ 112.9/317.3 MB 3.4 MB/s eta 0:01:01
------------- ------------------------ 113.1/317.3 MB 3.4 MB/s eta 0:01:01
------------- ------------------------ 113.1/317.3 MB 3.4 MB/s eta 0:01:01
------------- ------------------------ 113.1/317.3 MB 3.3 MB/s eta 0:01:02
------------- ------------------------ 113.2/317.3 MB 3.3 MB/s eta 0:01:03
------------- ------------------------ 113.3/317.3 MB 3.2 MB/s eta 0:01:04
------------- ------------------------ 113.6/317.3 MB 3.2 MB/s eta 0:01:03
------------- ------------------------ 113.8/317.3 MB 3.2 MB/s eta 0:01:03
------------- ------------------------ 113.9/317.3 MB 3.2 MB/s eta 0:01:04
------------- ------------------------ 114.1/317.3 MB 3.2 MB/s eta 0:01:03
------------- ------------------------ 114.4/317.3 MB 3.2 MB/s eta 0:01:03
------------- ------------------------ 114.5/317.3 MB 3.2 MB/s eta 0:01:04
------------- ------------------------ 114.8/317.3 MB 3.3 MB/s eta 0:01:03
------------- ------------------------ 115.0/317.3 MB 3.2 MB/s eta 0:01:03
------------- ------------------------ 115.1/317.3 MB 3.2 MB/s eta 0:01:03
------------- ------------------------ 115.3/317.3 MB 3.2 MB/s eta 0:01:03
------------- ------------------------ 115.7/317.3 MB 3.4 MB/s eta 0:01:00
------------- ------------------------ 115.8/317.3 MB 3.3 MB/s eta 0:01:01
------------- ------------------------ 116.0/317.3 MB 3.3 MB/s eta 0:01:02
------------- ------------------------ 116.4/317.3 MB 3.4 MB/s eta 0:01:00
------------- ------------------------ 116.6/317.3 MB 3.4 MB/s eta 0:00:59
------------- ------------------------ 116.9/317.3 MB 3.4 MB/s eta 0:00:59
-------------- ----------------------- 117.1/317.3 MB 3.5 MB/s eta 0:00:58
-------------- ----------------------- 117.3/317.3 MB 3.5 MB/s eta 0:00:57
-------------- ----------------------- 117.5/317.3 MB 3.6 MB/s eta 0:00:56
-------------- ----------------------- 117.7/317.3 MB 3.6 MB/s eta 0:00:56
-------------- ----------------------- 117.9/317.3 MB 3.6 MB/s eta 0:00:55
-------------- ----------------------- 118.2/317.3 MB 3.7 MB/s eta 0:00:54
-------------- ----------------------- 118.4/317.3 MB 3.7 MB/s eta 0:00:54
-------------- ----------------------- 118.6/317.3 MB 3.9 MB/s eta 0:00:52
-------------- ----------------------- 118.8/317.3 MB 3.8 MB/s eta 0:00:53
-------------- ----------------------- 119.0/317.3 MB 3.8 MB/s eta 0:00:53
-------------- ----------------------- 119.1/317.3 MB 3.9 MB/s eta 0:00:51
-------------- ----------------------- 119.2/317.3 MB 3.8 MB/s eta 0:00:53
-------------- ----------------------- 119.4/317.3 MB 3.9 MB/s eta 0:00:51
-------------- ----------------------- 119.7/317.3 MB 3.9 MB/s eta 0:00:51
-------------- ----------------------- 120.0/317.3 MB 4.0 MB/s eta 0:00:50
-------------- ----------------------- 120.1/317.3 MB 4.0 MB/s eta 0:00:50
-------------- ----------------------- 120.1/317.3 MB 3.9 MB/s eta 0:00:51
-------------- ----------------------- 120.4/317.3 MB 4.0 MB/s eta 0:00:49
-------------- ----------------------- 120.6/317.3 MB 4.0 MB/s eta 0:00:49
-------------- ----------------------- 120.8/317.3 MB 4.1 MB/s eta 0:00:48
-------------- ----------------------- 121.0/317.3 MB 4.1 MB/s eta 0:00:48
-------------- ----------------------- 121.3/317.3 MB 4.2 MB/s eta 0:00:47
-------------- ----------------------- 121.5/317.3 MB 4.3 MB/s eta 0:00:47
-------------- ----------------------- 121.7/317.3 MB 4.2 MB/s eta 0:00:47
-------------- ----------------------- 121.9/317.3 MB 4.2 MB/s eta 0:00:47
-------------- ----------------------- 122.1/317.3 MB 4.2 MB/s eta 0:00:47
-------------- ----------------------- 122.4/317.3 MB 4.2 MB/s eta 0:00:47
-------------- ----------------------- 122.6/317.3 MB 4.2 MB/s eta 0:00:47
-------------- ----------------------- 122.8/317.3 MB 4.2 MB/s eta 0:00:47
-------------- ----------------------- 123.1/317.3 MB 4.2 MB/s eta 0:00:47
-------------- ----------------------- 123.3/317.3 MB 4.2 MB/s eta 0:00:46
-------------- ----------------------- 123.5/317.3 MB 4.4 MB/s eta 0:00:44
-------------- ----------------------- 123.6/317.3 MB 4.4 MB/s eta 0:00:45
-------------- ----------------------- 123.8/317.3 MB 4.3 MB/s eta 0:00:45
-------------- ----------------------- 123.9/317.3 MB 4.4 MB/s eta 0:00:45
-------------- ----------------------- 124.1/317.3 MB 4.3 MB/s eta 0:00:45
-------------- ----------------------- 124.3/317.3 MB 4.3 MB/s eta 0:00:45
-------------- ----------------------- 124.5/317.3 MB 4.3 MB/s eta 0:00:45
-------------- ----------------------- 124.7/317.3 MB 4.3 MB/s eta 0:00:45
-------------- ----------------------- 124.9/317.3 MB 4.3 MB/s eta 0:00:45
-------------- ----------------------- 125.0/317.3 MB 4.3 MB/s eta 0:00:45
-------------- ----------------------- 125.2/317.3 MB 4.3 MB/s eta 0:00:46
--------------- ---------------------- 125.3/317.3 MB 4.3 MB/s eta 0:00:46
--------------- ---------------------- 125.5/317.3 MB 4.3 MB/s eta 0:00:46
--------------- ---------------------- 125.6/317.3 MB 4.2 MB/s eta 0:00:46
--------------- ---------------------- 125.8/317.3 MB 4.1 MB/s eta 0:00:47
--------------- ---------------------- 126.0/317.3 MB 4.1 MB/s eta 0:00:47
--------------- ---------------------- 126.1/317.3 MB 4.1 MB/s eta 0:00:47
--------------- ---------------------- 126.3/317.3 MB 4.1 MB/s eta 0:00:47
--------------- ---------------------- 126.4/317.3 MB 4.1 MB/s eta 0:00:47
--------------- ---------------------- 126.7/317.3 MB 4.1 MB/s eta 0:00:47
--------------- ---------------------- 126.8/317.3 MB 4.0 MB/s eta 0:00:48
--------------- ---------------------- 127.0/317.3 MB 4.0 MB/s eta 0:00:48
--------------- ---------------------- 127.2/317.3 MB 4.0 MB/s eta 0:00:48
--------------- ---------------------- 127.3/317.3 MB 4.0 MB/s eta 0:00:48
--------------- ---------------------- 127.5/317.3 MB 4.0 MB/s eta 0:00:48
--------------- ---------------------- 127.7/317.3 MB 4.0 MB/s eta 0:00:48
--------------- ---------------------- 127.9/317.3 MB 4.0 MB/s eta 0:00:48
--------------- ---------------------- 127.9/317.3 MB 3.9 MB/s eta 0:00:49
--------------- ---------------------- 128.1/317.3 MB 3.9 MB/s eta 0:00:49
--------------- ---------------------- 128.3/317.3 MB 3.9 MB/s eta 0:00:49
--------------- ---------------------- 128.4/317.3 MB 3.9 MB/s eta 0:00:49
--------------- ---------------------- 128.5/317.3 MB 3.8 MB/s eta 0:00:50
--------------- ---------------------- 128.7/317.3 MB 3.8 MB/s eta 0:00:50
--------------- ---------------------- 128.9/317.3 MB 3.8 MB/s eta 0:00:50
--------------- ---------------------- 129.1/317.3 MB 3.8 MB/s eta 0:00:50
--------------- ---------------------- 129.3/317.3 MB 3.8 MB/s eta 0:00:50
--------------- ---------------------- 129.5/317.3 MB 3.9 MB/s eta 0:00:49
--------------- ---------------------- 129.7/317.3 MB 3.9 MB/s eta 0:00:49
--------------- ---------------------- 129.7/317.3 MB 3.8 MB/s eta 0:00:50
--------------- ---------------------- 130.0/317.3 MB 3.8 MB/s eta 0:00:50
--------------- ---------------------- 130.2/317.3 MB 3.8 MB/s eta 0:00:50
--------------- ---------------------- 130.3/317.3 MB 3.9 MB/s eta 0:00:49
--------------- ---------------------- 130.5/317.3 MB 3.8 MB/s eta 0:00:50
--------------- ---------------------- 130.7/317.3 MB 3.8 MB/s eta 0:00:50
--------------- ---------------------- 130.9/317.3 MB 3.8 MB/s eta 0:00:50
--------------- ---------------------- 131.0/317.3 MB 3.8 MB/s eta 0:00:50
--------------- ---------------------- 131.2/317.3 MB 3.8 MB/s eta 0:00:50
--------------- ---------------------- 131.3/317.3 MB 3.7 MB/s eta 0:00:50
--------------- ---------------------- 131.5/317.3 MB 3.7 MB/s eta 0:00:50
--------------- ---------------------- 131.7/317.3 MB 3.7 MB/s eta 0:00:51
--------------- ---------------------- 131.9/317.3 MB 3.7 MB/s eta 0:00:51
--------------- ---------------------- 132.1/317.3 MB 3.7 MB/s eta 0:00:51
--------------- ---------------------- 132.3/317.3 MB 3.7 MB/s eta 0:00:51
--------------- ---------------------- 132.5/317.3 MB 3.7 MB/s eta 0:00:51
--------------- ---------------------- 132.6/317.3 MB 3.7 MB/s eta 0:00:51
--------------- ---------------------- 132.9/317.3 MB 3.7 MB/s eta 0:00:51
--------------- ---------------------- 133.1/317.3 MB 3.7 MB/s eta 0:00:51
--------------- ---------------------- 133.2/317.3 MB 3.6 MB/s eta 0:00:51
--------------- ---------------------- 133.4/317.3 MB 3.6 MB/s eta 0:00:52
--------------- ---------------------- 133.4/317.3 MB 3.6 MB/s eta 0:00:52
---------------- --------------------- 133.7/317.3 MB 3.5 MB/s eta 0:00:52
---------------- --------------------- 133.8/317.3 MB 3.6 MB/s eta 0:00:52
---------------- --------------------- 134.1/317.3 MB 3.6 MB/s eta 0:00:51
---------------- --------------------- 134.2/317.3 MB 3.6 MB/s eta 0:00:51
---------------- --------------------- 134.4/317.3 MB 3.6 MB/s eta 0:00:51
---------------- --------------------- 134.5/317.3 MB 3.6 MB/s eta 0:00:52
---------------- --------------------- 134.7/317.3 MB 3.6 MB/s eta 0:00:52
---------------- --------------------- 134.9/317.3 MB 3.6 MB/s eta 0:00:51
---------------- --------------------- 135.1/317.3 MB 3.6 MB/s eta 0:00:51
---------------- --------------------- 135.4/317.3 MB 3.6 MB/s eta 0:00:51
---------------- --------------------- 135.5/317.3 MB 3.6 MB/s eta 0:00:51
---------------- --------------------- 135.6/317.3 MB 3.6 MB/s eta 0:00:51
---------------- --------------------- 135.8/317.3 MB 3.6 MB/s eta 0:00:51
---------------- --------------------- 136.0/317.3 MB 3.7 MB/s eta 0:00:50
---------------- --------------------- 136.3/317.3 MB 3.7 MB/s eta 0:00:49
---------------- --------------------- 136.4/317.3 MB 3.7 MB/s eta 0:00:50
---------------- --------------------- 136.5/317.3 MB 3.7 MB/s eta 0:00:50
---------------- --------------------- 136.8/317.3 MB 3.7 MB/s eta 0:00:50
---------------- --------------------- 137.0/317.3 MB 3.7 MB/s eta 0:00:50
---------------- --------------------- 137.1/317.3 MB 3.7 MB/s eta 0:00:49
---------------- --------------------- 137.3/317.3 MB 3.7 MB/s eta 0:00:49
---------------- --------------------- 137.5/317.3 MB 3.7 MB/s eta 0:00:50
---------------- --------------------- 137.8/317.3 MB 3.7 MB/s eta 0:00:49
---------------- --------------------- 138.0/317.3 MB 3.7 MB/s eta 0:00:49
---------------- --------------------- 138.2/317.3 MB 3.8 MB/s eta 0:00:48
---------------- --------------------- 138.4/317.3 MB 3.8 MB/s eta 0:00:48
---------------- --------------------- 138.7/317.3 MB 3.9 MB/s eta 0:00:46
---------------- --------------------- 138.9/317.3 MB 3.9 MB/s eta 0:00:46
---------------- --------------------- 139.0/317.3 MB 3.9 MB/s eta 0:00:46
---------------- --------------------- 139.1/317.3 MB 3.8 MB/s eta 0:00:47
---------------- --------------------- 139.3/317.3 MB 3.8 MB/s eta 0:00:47
---------------- --------------------- 139.5/317.3 MB 3.8 MB/s eta 0:00:47
---------------- --------------------- 139.8/317.3 MB 3.8 MB/s eta 0:00:47
---------------- --------------------- 140.0/317.3 MB 3.9 MB/s eta 0:00:46
---------------- --------------------- 140.2/317.3 MB 3.9 MB/s eta 0:00:46
---------------- --------------------- 140.3/317.3 MB 3.9 MB/s eta 0:00:46
---------------- --------------------- 140.6/317.3 MB 3.9 MB/s eta 0:00:46
---------------- --------------------- 140.8/317.3 MB 3.9 MB/s eta 0:00:46
---------------- --------------------- 140.8/317.3 MB 3.9 MB/s eta 0:00:46
---------------- --------------------- 140.9/317.3 MB 3.8 MB/s eta 0:00:47
---------------- --------------------- 141.2/317.3 MB 3.9 MB/s eta 0:00:46
---------------- --------------------- 141.4/317.3 MB 3.9 MB/s eta 0:00:46
---------------- --------------------- 141.6/317.3 MB 3.9 MB/s eta 0:00:46
---------------- --------------------- 141.8/317.3 MB 3.9 MB/s eta 0:00:46
----------------- -------------------- 142.1/317.3 MB 4.0 MB/s eta 0:00:45
----------------- -------------------- 142.2/317.3 MB 3.9 MB/s eta 0:00:45
----------------- -------------------- 142.4/317.3 MB 3.9 MB/s eta 0:00:45
----------------- -------------------- 142.6/317.3 MB 3.9 MB/s eta 0:00:45
----------------- -------------------- 142.9/317.3 MB 4.0 MB/s eta 0:00:44
----------------- -------------------- 143.1/317.3 MB 4.0 MB/s eta 0:00:44
----------------- -------------------- 143.3/317.3 MB 3.9 MB/s eta 0:00:45
----------------- -------------------- 143.4/317.3 MB 3.9 MB/s eta 0:00:45
----------------- -------------------- 143.5/317.3 MB 3.9 MB/s eta 0:00:45
----------------- -------------------- 143.6/317.3 MB 3.9 MB/s eta 0:00:45
----------------- -------------------- 143.7/317.3 MB 3.9 MB/s eta 0:00:45
----------------- -------------------- 143.9/317.3 MB 3.9 MB/s eta 0:00:45
----------------- -------------------- 144.2/317.3 MB 3.9 MB/s eta 0:00:45
----------------- -------------------- 144.4/317.3 MB 3.9 MB/s eta 0:00:45
----------------- -------------------- 144.5/317.3 MB 4.0 MB/s eta 0:00:44
----------------- -------------------- 144.8/317.3 MB 4.0 MB/s eta 0:00:44
----------------- -------------------- 145.1/317.3 MB 4.0 MB/s eta 0:00:44
----------------- -------------------- 145.2/317.3 MB 4.0 MB/s eta 0:00:44
----------------- -------------------- 145.3/317.3 MB 3.9 MB/s eta 0:00:44
----------------- -------------------- 145.5/317.3 MB 3.9 MB/s eta 0:00:44
----------------- -------------------- 145.7/317.3 MB 4.0 MB/s eta 0:00:44
----------------- -------------------- 145.9/317.3 MB 3.9 MB/s eta 0:00:44
----------------- -------------------- 146.1/317.3 MB 4.0 MB/s eta 0:00:44
----------------- -------------------- 146.2/317.3 MB 3.9 MB/s eta 0:00:44
----------------- -------------------- 146.4/317.3 MB 3.9 MB/s eta 0:00:44
----------------- -------------------- 146.7/317.3 MB 3.9 MB/s eta 0:00:44
----------------- -------------------- 146.9/317.3 MB 4.0 MB/s eta 0:00:43
----------------- -------------------- 147.1/317.3 MB 4.0 MB/s eta 0:00:43
----------------- -------------------- 147.3/317.3 MB 4.0 MB/s eta 0:00:43
----------------- -------------------- 147.6/317.3 MB 4.1 MB/s eta 0:00:42
----------------- -------------------- 147.8/317.3 MB 4.1 MB/s eta 0:00:42
----------------- -------------------- 148.0/317.3 MB 4.0 MB/s eta 0:00:42
----------------- -------------------- 148.0/317.3 MB 4.0 MB/s eta 0:00:42
----------------- -------------------- 148.0/317.3 MB 4.0 MB/s eta 0:00:42
----------------- -------------------- 148.2/317.3 MB 3.9 MB/s eta 0:00:44
----------------- -------------------- 148.8/317.3 MB 4.0 MB/s eta 0:00:43
----------------- -------------------- 149.0/317.3 MB 4.0 MB/s eta 0:00:43
----------------- -------------------- 149.2/317.3 MB 4.0 MB/s eta 0:00:43
----------------- -------------------- 149.4/317.3 MB 4.1 MB/s eta 0:00:42
----------------- -------------------- 149.6/317.3 MB 4.1 MB/s eta 0:00:41
----------------- -------------------- 149.9/317.3 MB 4.1 MB/s eta 0:00:41
----------------- -------------------- 150.0/317.3 MB 4.1 MB/s eta 0:00:42
----------------- -------------------- 150.2/317.3 MB 4.0 MB/s eta 0:00:42
------------------ ------------------- 150.4/317.3 MB 4.0 MB/s eta 0:00:42
------------------ ------------------- 150.6/317.3 MB 4.1 MB/s eta 0:00:42
------------------ ------------------- 150.9/317.3 MB 4.1 MB/s eta 0:00:41
------------------ ------------------- 150.9/317.3 MB 4.0 MB/s eta 0:00:42
------------------ ------------------- 151.2/317.3 MB 4.2 MB/s eta 0:00:40
------------------ ------------------- 151.4/317.3 MB 4.2 MB/s eta 0:00:40
------------------ ------------------- 151.5/317.3 MB 4.1 MB/s eta 0:00:41
------------------ ------------------- 151.7/317.3 MB 4.1 MB/s eta 0:00:41
------------------ ------------------- 152.0/317.3 MB 4.1 MB/s eta 0:00:41
------------------ ------------------- 152.2/317.3 MB 4.1 MB/s eta 0:00:41
------------------ ------------------- 152.4/317.3 MB 4.1 MB/s eta 0:00:41
------------------ ------------------- 152.6/317.3 MB 4.1 MB/s eta 0:00:41
------------------ ------------------- 152.6/317.3 MB 4.1 MB/s eta 0:00:41
------------------ ------------------- 152.8/317.3 MB 4.1 MB/s eta 0:00:41
------------------ ------------------- 153.1/317.3 MB 4.0 MB/s eta 0:00:41
------------------ ------------------- 153.1/317.3 MB 4.0 MB/s eta 0:00:41
------------------ ------------------- 153.1/317.3 MB 4.0 MB/s eta 0:00:41
------------------ ------------------- 153.1/317.3 MB 4.0 MB/s eta 0:00:41
------------------ ------------------- 153.1/317.3 MB 4.0 MB/s eta 0:00:41
------------------ ------------------- 153.1/317.3 MB 4.0 MB/s eta 0:00:41
------------------ ------------------- 153.1/317.3 MB 4.0 MB/s eta 0:00:41
------------------ ------------------- 153.1/317.3 MB 4.0 MB/s eta 0:00:41
------------------ ------------------- 153.7/317.3 MB 3.7 MB/s eta 0:00:45
------------------ ------------------- 153.7/317.3 MB 3.7 MB/s eta 0:00:45
------------------ ------------------- 153.9/317.3 MB 3.7 MB/s eta 0:00:44
------------------ ------------------- 154.0/317.3 MB 3.7 MB/s eta 0:00:44
------------------ ------------------- 154.2/317.3 MB 3.7 MB/s eta 0:00:44
------------------ ------------------- 154.3/317.3 MB 3.7 MB/s eta 0:00:45
------------------ ------------------- 154.4/317.3 MB 3.6 MB/s eta 0:00:45
------------------ ------------------- 154.6/317.3 MB 3.6 MB/s eta 0:00:45
------------------ ------------------- 154.7/317.3 MB 3.6 MB/s eta 0:00:46
------------------ ------------------- 154.9/317.3 MB 3.6 MB/s eta 0:00:46
------------------ ------------------- 155.0/317.3 MB 3.6 MB/s eta 0:00:46
------------------ ------------------- 155.1/317.3 MB 3.5 MB/s eta 0:00:46
------------------ ------------------- 155.2/317.3 MB 3.5 MB/s eta 0:00:47
------------------ ------------------- 155.4/317.3 MB 3.5 MB/s eta 0:00:47
------------------ ------------------- 155.6/317.3 MB 3.5 MB/s eta 0:00:46
------------------ ------------------- 155.7/317.3 MB 3.5 MB/s eta 0:00:46
------------------ ------------------- 155.8/317.3 MB 3.5 MB/s eta 0:00:47
------------------ ------------------- 156.0/317.3 MB 3.5 MB/s eta 0:00:47
------------------ ------------------- 156.0/317.3 MB 3.4 MB/s eta 0:00:48
------------------ ------------------- 156.1/317.3 MB 3.4 MB/s eta 0:00:48
------------------ ------------------- 156.3/317.3 MB 3.4 MB/s eta 0:00:48
------------------ ------------------- 156.5/317.3 MB 3.4 MB/s eta 0:00:48
------------------ ------------------- 156.5/317.3 MB 3.4 MB/s eta 0:00:48
------------------ ------------------- 156.7/317.3 MB 3.3 MB/s eta 0:00:49
------------------ ------------------- 156.8/317.3 MB 3.3 MB/s eta 0:00:49
------------------ ------------------- 156.9/317.3 MB 3.3 MB/s eta 0:00:49
------------------ ------------------- 157.1/317.3 MB 3.3 MB/s eta 0:00:49
------------------ ------------------- 157.3/317.3 MB 3.3 MB/s eta 0:00:49
------------------ ------------------- 157.5/317.3 MB 3.3 MB/s eta 0:00:49
------------------ ------------------- 157.7/317.3 MB 3.3 MB/s eta 0:00:49
------------------ ------------------- 157.9/317.3 MB 3.3 MB/s eta 0:00:49
------------------ ------------------- 158.1/317.3 MB 3.3 MB/s eta 0:00:49
------------------ ------------------- 158.3/317.3 MB 3.4 MB/s eta 0:00:47
------------------ ------------------- 158.6/317.3 MB 3.4 MB/s eta 0:00:48
------------------- ------------------ 158.7/317.3 MB 3.3 MB/s eta 0:00:49
------------------- ------------------ 158.8/317.3 MB 3.3 MB/s eta 0:00:49
------------------- ------------------ 159.0/317.3 MB 3.2 MB/s eta 0:00:49
------------------- ------------------ 159.1/317.3 MB 3.2 MB/s eta 0:00:50
------------------- ------------------ 159.4/317.3 MB 3.2 MB/s eta 0:00:49
------------------- ------------------ 159.5/317.3 MB 3.2 MB/s eta 0:00:50
------------------- ------------------ 159.7/317.3 MB 3.2 MB/s eta 0:00:50
------------------- ------------------ 159.8/317.3 MB 3.2 MB/s eta 0:00:50
------------------- ------------------ 160.1/317.3 MB 3.2 MB/s eta 0:00:49
------------------- ------------------ 160.2/317.3 MB 3.2 MB/s eta 0:00:49
------------------- ------------------ 160.4/317.3 MB 3.2 MB/s eta 0:00:50
------------------- ------------------ 160.6/317.3 MB 3.2 MB/s eta 0:00:50
------------------- ------------------ 160.8/317.3 MB 3.2 MB/s eta 0:00:50
------------------- ------------------ 161.0/317.3 MB 3.2 MB/s eta 0:00:50
------------------- ------------------ 161.2/317.3 MB 3.2 MB/s eta 0:00:49
------------------- ------------------ 161.4/317.3 MB 3.2 MB/s eta 0:00:49
------------------- ------------------ 161.5/317.3 MB 3.2 MB/s eta 0:00:50
------------------- ------------------ 161.8/317.3 MB 3.2 MB/s eta 0:00:49
------------------- ------------------ 161.9/317.3 MB 3.2 MB/s eta 0:00:49
------------------- ------------------ 162.2/317.3 MB 3.2 MB/s eta 0:00:49
------------------- ------------------ 162.3/317.3 MB 3.2 MB/s eta 0:00:49
------------------- ------------------ 162.6/317.3 MB 3.2 MB/s eta 0:00:49
------------------- ------------------ 162.6/317.3 MB 3.2 MB/s eta 0:00:49
------------------- ------------------ 162.7/317.3 MB 3.1 MB/s eta 0:00:50
------------------- ------------------ 163.1/317.3 MB 3.2 MB/s eta 0:00:49
------------------- ------------------ 163.2/317.3 MB 3.2 MB/s eta 0:00:49
------------------- ------------------ 163.5/317.3 MB 3.5 MB/s eta 0:00:44
------------------- ------------------ 163.6/317.3 MB 3.5 MB/s eta 0:00:45
------------------- ------------------ 163.9/317.3 MB 3.4 MB/s eta 0:00:45
------------------- ------------------ 164.0/317.3 MB 3.5 MB/s eta 0:00:45
------------------- ------------------ 164.2/317.3 MB 3.5 MB/s eta 0:00:44
------------------- ------------------ 164.5/317.3 MB 3.5 MB/s eta 0:00:44
------------------- ------------------ 164.6/317.3 MB 3.6 MB/s eta 0:00:43
------------------- ------------------ 164.8/317.3 MB 3.5 MB/s eta 0:00:44
------------------- ------------------ 165.1/317.3 MB 3.6 MB/s eta 0:00:43
------------------- ------------------ 165.3/317.3 MB 3.7 MB/s eta 0:00:42
------------------- ------------------ 165.6/317.3 MB 3.7 MB/s eta 0:00:42
------------------- ------------------ 165.8/317.3 MB 3.7 MB/s eta 0:00:41
------------------- ------------------ 166.0/317.3 MB 3.7 MB/s eta 0:00:41
------------------- ------------------ 166.1/317.3 MB 3.7 MB/s eta 0:00:41
------------------- ------------------ 166.4/317.3 MB 3.8 MB/s eta 0:00:40
------------------- ------------------ 166.5/317.3 MB 3.8 MB/s eta 0:00:40
------------------- ------------------ 166.7/317.3 MB 3.8 MB/s eta 0:00:40
------------------- ------------------ 166.9/317.3 MB 3.9 MB/s eta 0:00:39
-------------------- ----------------- 167.0/317.3 MB 3.9 MB/s eta 0:00:39
-------------------- ----------------- 167.3/317.3 MB 3.9 MB/s eta 0:00:39
-------------------- ----------------- 167.4/317.3 MB 4.0 MB/s eta 0:00:38
-------------------- ----------------- 167.6/317.3 MB 4.0 MB/s eta 0:00:38
-------------------- ----------------- 167.8/317.3 MB 4.0 MB/s eta 0:00:38
-------------------- ----------------- 168.0/317.3 MB 4.0 MB/s eta 0:00:38
-------------------- ----------------- 168.2/317.3 MB 3.9 MB/s eta 0:00:39
-------------------- ----------------- 168.4/317.3 MB 3.9 MB/s eta 0:00:38
-------------------- ----------------- 168.6/317.3 MB 3.9 MB/s eta 0:00:38
-------------------- ----------------- 168.7/317.3 MB 3.9 MB/s eta 0:00:39
-------------------- ----------------- 168.9/317.3 MB 3.9 MB/s eta 0:00:38
-------------------- ----------------- 169.2/317.3 MB 4.0 MB/s eta 0:00:38
-------------------- ----------------- 169.4/317.3 MB 4.0 MB/s eta 0:00:38
-------------------- ----------------- 169.6/317.3 MB 4.0 MB/s eta 0:00:38
-------------------- ----------------- 169.8/317.3 MB 4.0 MB/s eta 0:00:37
-------------------- ----------------- 170.1/317.3 MB 4.1 MB/s eta 0:00:37
-------------------- ----------------- 170.3/317.3 MB 4.0 MB/s eta 0:00:37
-------------------- ----------------- 170.5/317.3 MB 4.1 MB/s eta 0:00:36
-------------------- ----------------- 170.7/317.3 MB 4.1 MB/s eta 0:00:36
-------------------- ----------------- 170.9/317.3 MB 4.1 MB/s eta 0:00:36
-------------------- ----------------- 171.1/317.3 MB 4.1 MB/s eta 0:00:36
-------------------- ----------------- 171.3/317.3 MB 4.1 MB/s eta 0:00:36
-------------------- ----------------- 171.6/317.3 MB 4.1 MB/s eta 0:00:36
-------------------- ----------------- 171.8/317.3 MB 4.1 MB/s eta 0:00:36
-------------------- ----------------- 172.0/317.3 MB 4.1 MB/s eta 0:00:36
-------------------- ----------------- 172.1/317.3 MB 4.1 MB/s eta 0:00:36
-------------------- ----------------- 172.2/317.3 MB 4.1 MB/s eta 0:00:36
-------------------- ----------------- 172.4/317.3 MB 4.1 MB/s eta 0:00:36
-------------------- ----------------- 172.5/317.3 MB 4.1 MB/s eta 0:00:36
-------------------- ----------------- 172.7/317.3 MB 4.1 MB/s eta 0:00:36
-------------------- ----------------- 173.0/317.3 MB 4.3 MB/s eta 0:00:34
-------------------- ----------------- 173.0/317.3 MB 4.2 MB/s eta 0:00:35
-------------------- ----------------- 173.2/317.3 MB 4.1 MB/s eta 0:00:35
-------------------- ----------------- 173.3/317.3 MB 4.1 MB/s eta 0:00:36
-------------------- ----------------- 173.4/317.3 MB 4.1 MB/s eta 0:00:36
-------------------- ----------------- 173.5/317.3 MB 4.1 MB/s eta 0:00:36
-------------------- ----------------- 173.8/317.3 MB 4.1 MB/s eta 0:00:36
-------------------- ----------------- 173.9/317.3 MB 4.1 MB/s eta 0:00:36
-------------------- ----------------- 174.2/317.3 MB 4.1 MB/s eta 0:00:36
-------------------- ----------------- 174.3/317.3 MB 4.0 MB/s eta 0:00:36
-------------------- ----------------- 174.5/317.3 MB 4.0 MB/s eta 0:00:36
-------------------- ----------------- 174.7/317.3 MB 4.0 MB/s eta 0:00:36
-------------------- ----------------- 174.9/317.3 MB 4.0 MB/s eta 0:00:36
-------------------- ----------------- 175.0/317.3 MB 4.0 MB/s eta 0:00:36
-------------------- ----------------- 175.1/317.3 MB 4.0 MB/s eta 0:00:36
-------------------- ----------------- 175.3/317.3 MB 4.0 MB/s eta 0:00:36
--------------------- ---------------- 175.5/317.3 MB 3.9 MB/s eta 0:00:36
--------------------- ---------------- 175.7/317.3 MB 3.9 MB/s eta 0:00:37
--------------------- ---------------- 175.9/317.3 MB 3.9 MB/s eta 0:00:37
--------------------- ---------------- 175.9/317.3 MB 3.9 MB/s eta 0:00:37
--------------------- ---------------- 176.3/317.3 MB 3.9 MB/s eta 0:00:36
--------------------- ---------------- 176.3/317.3 MB 3.9 MB/s eta 0:00:37
--------------------- ---------------- 176.4/317.3 MB 3.9 MB/s eta 0:00:37
--------------------- ---------------- 176.7/317.3 MB 3.9 MB/s eta 0:00:37
--------------------- ---------------- 176.8/317.3 MB 3.9 MB/s eta 0:00:36
--------------------- ---------------- 177.0/317.3 MB 3.8 MB/s eta 0:00:37
--------------------- ---------------- 177.3/317.3 MB 3.8 MB/s eta 0:00:37
--------------------- ---------------- 177.3/317.3 MB 3.9 MB/s eta 0:00:37
--------------------- ---------------- 177.7/317.3 MB 3.9 MB/s eta 0:00:37
--------------------- ---------------- 177.8/317.3 MB 3.8 MB/s eta 0:00:37
--------------------- ---------------- 177.8/317.3 MB 3.8 MB/s eta 0:00:37
--------------------- ---------------- 178.1/317.3 MB 3.8 MB/s eta 0:00:37
--------------------- ---------------- 178.3/317.3 MB 3.8 MB/s eta 0:00:37
--------------------- ---------------- 178.3/317.3 MB 3.7 MB/s eta 0:00:38
--------------------- ---------------- 178.6/317.3 MB 3.8 MB/s eta 0:00:37
--------------------- ---------------- 178.8/317.3 MB 3.7 MB/s eta 0:00:38
--------------------- ---------------- 178.9/317.3 MB 3.7 MB/s eta 0:00:38
--------------------- ---------------- 179.1/317.3 MB 3.8 MB/s eta 0:00:37
--------------------- ---------------- 179.2/317.3 MB 3.7 MB/s eta 0:00:37
--------------------- ---------------- 179.3/317.3 MB 3.7 MB/s eta 0:00:38
--------------------- ---------------- 179.5/317.3 MB 3.7 MB/s eta 0:00:38
--------------------- ---------------- 179.5/317.3 MB 3.7 MB/s eta 0:00:38
--------------------- ---------------- 179.7/317.3 MB 3.6 MB/s eta 0:00:39
--------------------- ---------------- 179.9/317.3 MB 3.6 MB/s eta 0:00:38
--------------------- ---------------- 179.9/317.3 MB 3.6 MB/s eta 0:00:38
--------------------- ---------------- 180.0/317.3 MB 3.5 MB/s eta 0:00:40
--------------------- ---------------- 180.2/317.3 MB 3.5 MB/s eta 0:00:40
--------------------- ---------------- 180.3/317.3 MB 3.5 MB/s eta 0:00:40
--------------------- ---------------- 180.4/317.3 MB 3.4 MB/s eta 0:00:40
--------------------- ---------------- 180.7/317.3 MB 3.5 MB/s eta 0:00:40
--------------------- ---------------- 180.7/317.3 MB 3.4 MB/s eta 0:00:40
--------------------- ---------------- 180.8/317.3 MB 3.4 MB/s eta 0:00:41
--------------------- ---------------- 181.1/317.3 MB 3.4 MB/s eta 0:00:40
--------------------- ---------------- 181.1/317.3 MB 3.4 MB/s eta 0:00:40
--------------------- ---------------- 181.1/317.3 MB 3.4 MB/s eta 0:00:40
--------------------- ---------------- 181.5/317.3 MB 3.3 MB/s eta 0:00:41
--------------------- ---------------- 181.6/317.3 MB 3.3 MB/s eta 0:00:41
--------------------- ---------------- 181.6/317.3 MB 3.3 MB/s eta 0:00:41
--------------------- ---------------- 181.9/317.3 MB 3.3 MB/s eta 0:00:42
--------------------- ---------------- 182.0/317.3 MB 3.3 MB/s eta 0:00:41
--------------------- ---------------- 182.0/317.3 MB 3.3 MB/s eta 0:00:41
--------------------- ---------------- 182.2/317.3 MB 3.2 MB/s eta 0:00:42
--------------------- ---------------- 182.4/317.3 MB 3.2 MB/s eta 0:00:42
--------------------- ---------------- 182.4/317.3 MB 3.2 MB/s eta 0:00:42
--------------------- ---------------- 182.6/317.3 MB 3.2 MB/s eta 0:00:43
--------------------- ---------------- 182.9/317.3 MB 3.2 MB/s eta 0:00:42
--------------------- ---------------- 182.9/317.3 MB 3.2 MB/s eta 0:00:42
--------------------- ---------------- 182.9/317.3 MB 3.1 MB/s eta 0:00:43
--------------------- ---------------- 183.1/317.3 MB 3.1 MB/s eta 0:00:44
--------------------- ---------------- 183.2/317.3 MB 3.1 MB/s eta 0:00:44
--------------------- ---------------- 183.3/317.3 MB 3.1 MB/s eta 0:00:44
--------------------- ---------------- 183.6/317.3 MB 3.2 MB/s eta 0:00:43
--------------------- ---------------- 183.6/317.3 MB 3.2 MB/s eta 0:00:43
--------------------- ---------------- 183.6/317.3 MB 3.2 MB/s eta 0:00:43
---------------------- --------------- 183.8/317.3 MB 3.1 MB/s eta 0:00:44
---------------------- --------------- 184.0/317.3 MB 3.1 MB/s eta 0:00:44
---------------------- --------------- 184.1/317.3 MB 3.1 MB/s eta 0:00:44
---------------------- --------------- 184.3/317.3 MB 3.0 MB/s eta 0:00:44
---------------------- --------------- 184.5/317.3 MB 3.1 MB/s eta 0:00:44
---------------------- --------------- 184.6/317.3 MB 3.0 MB/s eta 0:00:44
---------------------- --------------- 184.8/317.3 MB 3.1 MB/s eta 0:00:44
---------------------- --------------- 184.9/317.3 MB 3.0 MB/s eta 0:00:44
---------------------- --------------- 185.1/317.3 MB 3.0 MB/s eta 0:00:45
---------------------- --------------- 185.2/317.3 MB 3.0 MB/s eta 0:00:44
---------------------- --------------- 185.3/317.3 MB 3.0 MB/s eta 0:00:44
---------------------- --------------- 185.4/317.3 MB 3.0 MB/s eta 0:00:44
---------------------- --------------- 185.5/317.3 MB 3.0 MB/s eta 0:00:44
---------------------- --------------- 185.7/317.3 MB 3.0 MB/s eta 0:00:45
---------------------- --------------- 185.9/317.3 MB 3.0 MB/s eta 0:00:44
---------------------- --------------- 186.1/317.3 MB 3.0 MB/s eta 0:00:45
---------------------- --------------- 186.2/317.3 MB 3.0 MB/s eta 0:00:44
---------------------- --------------- 186.3/317.3 MB 3.0 MB/s eta 0:00:44
---------------------- --------------- 186.5/317.3 MB 2.9 MB/s eta 0:00:45
---------------------- --------------- 186.7/317.3 MB 3.0 MB/s eta 0:00:44
---------------------- --------------- 186.9/317.3 MB 3.0 MB/s eta 0:00:44
---------------------- --------------- 187.2/317.3 MB 3.1 MB/s eta 0:00:43
---------------------- --------------- 187.4/317.3 MB 3.0 MB/s eta 0:00:43
---------------------- --------------- 187.5/317.3 MB 3.0 MB/s eta 0:00:43
---------------------- --------------- 187.7/317.3 MB 3.0 MB/s eta 0:00:43
---------------------- --------------- 187.9/317.3 MB 3.0 MB/s eta 0:00:43
---------------------- --------------- 188.2/317.3 MB 3.1 MB/s eta 0:00:42
---------------------- --------------- 188.3/317.3 MB 3.1 MB/s eta 0:00:42
---------------------- --------------- 188.4/317.3 MB 3.1 MB/s eta 0:00:42
---------------------- --------------- 188.7/317.3 MB 3.1 MB/s eta 0:00:42
---------------------- --------------- 188.9/317.3 MB 3.1 MB/s eta 0:00:42
---------------------- --------------- 189.1/317.3 MB 3.1 MB/s eta 0:00:42
---------------------- --------------- 189.3/317.3 MB 3.1 MB/s eta 0:00:42
---------------------- --------------- 189.5/317.3 MB 3.2 MB/s eta 0:00:41
---------------------- --------------- 189.8/317.3 MB 3.2 MB/s eta 0:00:40
---------------------- --------------- 190.0/317.3 MB 3.2 MB/s eta 0:00:40
---------------------- --------------- 190.3/317.3 MB 3.3 MB/s eta 0:00:39
---------------------- --------------- 190.5/317.3 MB 3.3 MB/s eta 0:00:38
---------------------- --------------- 190.7/317.3 MB 3.4 MB/s eta 0:00:38
---------------------- --------------- 190.9/317.3 MB 3.4 MB/s eta 0:00:38
---------------------- --------------- 191.2/317.3 MB 3.4 MB/s eta 0:00:37
---------------------- --------------- 191.4/317.3 MB 3.5 MB/s eta 0:00:36
---------------------- --------------- 191.6/317.3 MB 3.5 MB/s eta 0:00:36
---------------------- --------------- 191.9/317.3 MB 3.6 MB/s eta 0:00:35
----------------------- -------------- 192.1/317.3 MB 3.6 MB/s eta 0:00:35
----------------------- -------------- 192.3/317.3 MB 3.7 MB/s eta 0:00:34
----------------------- -------------- 192.4/317.3 MB 3.6 MB/s eta 0:00:35
----------------------- -------------- 192.6/317.3 MB 3.6 MB/s eta 0:00:35
----------------------- -------------- 192.7/317.3 MB 3.7 MB/s eta 0:00:34
----------------------- -------------- 193.0/317.3 MB 3.7 MB/s eta 0:00:34
----------------------- -------------- 193.3/317.3 MB 3.8 MB/s eta 0:00:33
----------------------- -------------- 193.5/317.3 MB 3.9 MB/s eta 0:00:32
----------------------- -------------- 193.6/317.3 MB 3.9 MB/s eta 0:00:32
----------------------- -------------- 193.9/317.3 MB 3.9 MB/s eta 0:00:33
----------------------- -------------- 194.0/317.3 MB 4.0 MB/s eta 0:00:31
----------------------- -------------- 194.3/317.3 MB 4.0 MB/s eta 0:00:31
----------------------- -------------- 194.5/317.3 MB 4.1 MB/s eta 0:00:30
----------------------- -------------- 194.8/317.3 MB 4.1 MB/s eta 0:00:30
----------------------- -------------- 195.0/317.3 MB 4.1 MB/s eta 0:00:30
----------------------- -------------- 195.3/317.3 MB 4.3 MB/s eta 0:00:29
----------------------- -------------- 195.5/317.3 MB 4.3 MB/s eta 0:00:29
----------------------- -------------- 195.7/317.3 MB 4.4 MB/s eta 0:00:28
----------------------- -------------- 195.8/317.3 MB 4.3 MB/s eta 0:00:29
----------------------- -------------- 196.0/317.3 MB 4.3 MB/s eta 0:00:29
----------------------- -------------- 196.2/317.3 MB 4.4 MB/s eta 0:00:28
----------------------- -------------- 196.3/317.3 MB 4.3 MB/s eta 0:00:29
----------------------- -------------- 196.6/317.3 MB 4.4 MB/s eta 0:00:28
----------------------- -------------- 196.7/317.3 MB 4.4 MB/s eta 0:00:28
----------------------- -------------- 196.9/317.3 MB 4.4 MB/s eta 0:00:28
----------------------- -------------- 197.1/317.3 MB 4.4 MB/s eta 0:00:28
----------------------- -------------- 197.2/317.3 MB 4.3 MB/s eta 0:00:28
----------------------- -------------- 197.4/317.3 MB 4.3 MB/s eta 0:00:28
----------------------- -------------- 197.6/317.3 MB 4.3 MB/s eta 0:00:28
----------------------- -------------- 197.6/317.3 MB 4.3 MB/s eta 0:00:29
----------------------- -------------- 197.7/317.3 MB 4.2 MB/s eta 0:00:29
----------------------- -------------- 197.8/317.3 MB 4.2 MB/s eta 0:00:29
----------------------- -------------- 198.0/317.3 MB 4.1 MB/s eta 0:00:29
----------------------- -------------- 198.2/317.3 MB 4.1 MB/s eta 0:00:29
----------------------- -------------- 198.2/317.3 MB 4.0 MB/s eta 0:00:30
----------------------- -------------- 198.4/317.3 MB 4.1 MB/s eta 0:00:30
----------------------- -------------- 198.6/317.3 MB 4.1 MB/s eta 0:00:29
----------------------- -------------- 198.9/317.3 MB 4.1 MB/s eta 0:00:29
----------------------- -------------- 199.2/317.3 MB 4.2 MB/s eta 0:00:29
----------------------- -------------- 199.4/317.3 MB 4.2 MB/s eta 0:00:29
----------------------- -------------- 199.6/317.3 MB 4.2 MB/s eta 0:00:29
----------------------- -------------- 199.9/317.3 MB 4.2 MB/s eta 0:00:28
----------------------- -------------- 200.2/317.3 MB 4.2 MB/s eta 0:00:28
------------------------ ------------- 200.4/317.3 MB 4.2 MB/s eta 0:00:29
------------------------ ------------- 200.7/317.3 MB 4.2 MB/s eta 0:00:28
------------------------ ------------- 201.0/317.3 MB 4.3 MB/s eta 0:00:28
------------------------ ------------- 201.2/317.3 MB 4.2 MB/s eta 0:00:28
------------------------ ------------- 201.5/317.3 MB 4.3 MB/s eta 0:00:28
------------------------ ------------- 201.8/317.3 MB 4.3 MB/s eta 0:00:27
------------------------ ------------- 202.1/317.3 MB 4.3 MB/s eta 0:00:27
------------------------ ------------- 202.5/317.3 MB 4.4 MB/s eta 0:00:27
------------------------ ------------- 202.6/317.3 MB 4.3 MB/s eta 0:00:27
------------------------ ------------- 202.8/317.3 MB 4.4 MB/s eta 0:00:27
------------------------ ------------- 203.1/317.3 MB 4.5 MB/s eta 0:00:26
------------------------ ------------- 203.4/317.3 MB 4.5 MB/s eta 0:00:26
------------------------ ------------- 203.7/317.3 MB 4.4 MB/s eta 0:00:26
------------------------ ------------- 204.0/317.3 MB 4.5 MB/s eta 0:00:25
------------------------ ------------- 204.2/317.3 MB 4.5 MB/s eta 0:00:25
------------------------ ------------- 204.5/317.3 MB 4.6 MB/s eta 0:00:25
------------------------ ------------- 204.8/317.3 MB 4.6 MB/s eta 0:00:25
------------------------ ------------- 205.0/317.3 MB 4.5 MB/s eta 0:00:25
------------------------ ------------- 205.1/317.3 MB 4.5 MB/s eta 0:00:25
------------------------ ------------- 205.2/317.3 MB 4.5 MB/s eta 0:00:26
------------------------ ------------- 205.4/317.3 MB 4.5 MB/s eta 0:00:26
------------------------ ------------- 205.4/317.3 MB 4.4 MB/s eta 0:00:26
------------------------ ------------- 205.6/317.3 MB 4.3 MB/s eta 0:00:26
------------------------ ------------- 205.8/317.3 MB 4.3 MB/s eta 0:00:26
------------------------ ------------- 206.0/317.3 MB 4.4 MB/s eta 0:00:26
------------------------ ------------- 206.3/317.3 MB 4.4 MB/s eta 0:00:26
------------------------ ------------- 206.5/317.3 MB 4.4 MB/s eta 0:00:26
------------------------ ------------- 207.1/317.3 MB 4.5 MB/s eta 0:00:25
------------------------ ------------- 207.3/317.3 MB 4.6 MB/s eta 0:00:25
------------------------ ------------- 207.6/317.3 MB 4.6 MB/s eta 0:00:24
------------------------ ------------- 207.9/317.3 MB 4.9 MB/s eta 0:00:23
------------------------ ------------- 208.2/317.3 MB 5.0 MB/s eta 0:00:22
------------------------ ------------- 208.5/317.3 MB 5.2 MB/s eta 0:00:21
------------------------- ------------ 208.9/317.3 MB 5.2 MB/s eta 0:00:21
------------------------- ------------ 209.2/317.3 MB 5.2 MB/s eta 0:00:21
------------------------- ------------ 209.5/317.3 MB 5.3 MB/s eta 0:00:21
------------------------- ------------ 209.8/317.3 MB 5.3 MB/s eta 0:00:21
------------------------- ------------ 210.1/317.3 MB 5.4 MB/s eta 0:00:20
------------------------- ------------ 210.4/317.3 MB 5.4 MB/s eta 0:00:20
------------------------- ------------ 210.7/317.3 MB 5.4 MB/s eta 0:00:20
------------------------- ------------ 211.0/317.3 MB 5.5 MB/s eta 0:00:20
------------------------- ------------ 211.4/317.3 MB 5.5 MB/s eta 0:00:20
------------------------- ------------ 211.7/317.3 MB 5.5 MB/s eta 0:00:20
------------------------- ------------ 211.9/317.3 MB 5.5 MB/s eta 0:00:20
------------------------- ------------ 212.1/317.3 MB 5.4 MB/s eta 0:00:20
------------------------- ------------ 212.4/317.3 MB 5.4 MB/s eta 0:00:20
------------------------- ------------ 212.8/317.3 MB 5.5 MB/s eta 0:00:20
------------------------- ------------ 212.8/317.3 MB 5.5 MB/s eta 0:00:20
------------------------- ------------ 213.0/317.3 MB 5.4 MB/s eta 0:00:20
------------------------- ------------ 213.2/317.3 MB 5.3 MB/s eta 0:00:20
------------------------- ------------ 213.4/317.3 MB 5.3 MB/s eta 0:00:20
------------------------- ------------ 213.5/317.3 MB 5.2 MB/s eta 0:00:20
------------------------- ------------ 213.7/317.3 MB 5.2 MB/s eta 0:00:21
------------------------- ------------ 214.0/317.3 MB 5.1 MB/s eta 0:00:21
------------------------- ------------ 214.1/317.3 MB 5.1 MB/s eta 0:00:21
------------------------- ------------ 214.5/317.3 MB 5.2 MB/s eta 0:00:20
------------------------- ------------ 214.8/317.3 MB 5.1 MB/s eta 0:00:21
------------------------- ------------ 215.1/317.3 MB 5.2 MB/s eta 0:00:20
------------------------- ------------ 215.4/317.3 MB 5.2 MB/s eta 0:00:20
------------------------- ------------ 215.5/317.3 MB 5.3 MB/s eta 0:00:20
------------------------- ------------ 215.8/317.3 MB 5.5 MB/s eta 0:00:19
------------------------- ------------ 215.9/317.3 MB 5.5 MB/s eta 0:00:19
------------------------- ------------ 216.0/317.3 MB 5.4 MB/s eta 0:00:19
------------------------- ------------ 216.2/317.3 MB 5.4 MB/s eta 0:00:19
------------------------- ------------ 216.4/317.3 MB 5.3 MB/s eta 0:00:19
------------------------- ------------ 216.8/317.3 MB 5.5 MB/s eta 0:00:19
-------------------------- ----------- 217.1/317.3 MB 5.4 MB/s eta 0:00:19
-------------------------- ----------- 217.5/317.3 MB 5.5 MB/s eta 0:00:19
-------------------------- ----------- 217.8/317.3 MB 5.5 MB/s eta 0:00:19
-------------------------- ----------- 217.9/317.3 MB 5.4 MB/s eta 0:00:19
-------------------------- ----------- 218.1/317.3 MB 5.4 MB/s eta 0:00:19
-------------------------- ----------- 218.2/317.3 MB 5.2 MB/s eta 0:00:19
-------------------------- ----------- 218.3/317.3 MB 5.2 MB/s eta 0:00:20
-------------------------- ----------- 218.5/317.3 MB 5.2 MB/s eta 0:00:20
-------------------------- ----------- 218.6/317.3 MB 5.0 MB/s eta 0:00:20
-------------------------- ----------- 218.7/317.3 MB 5.0 MB/s eta 0:00:20
-------------------------- ----------- 218.8/317.3 MB 5.0 MB/s eta 0:00:20
-------------------------- ----------- 218.8/317.3 MB 5.0 MB/s eta 0:00:20
-------------------------- ----------- 218.8/317.3 MB 5.0 MB/s eta 0:00:20
-------------------------- ----------- 218.8/317.3 MB 5.0 MB/s eta 0:00:20
-------------------------- ----------- 218.8/317.3 MB 5.0 MB/s eta 0:00:20
-------------------------- ----------- 218.8/317.3 MB 5.0 MB/s eta 0:00:20
-------------------------- ----------- 218.8/317.3 MB 5.0 MB/s eta 0:00:20
-------------------------- ----------- 219.9/317.3 MB 4.5 MB/s eta 0:00:22
-------------------------- ----------- 220.2/317.3 MB 4.5 MB/s eta 0:00:22
-------------------------- ----------- 220.5/317.3 MB 4.5 MB/s eta 0:00:22
-------------------------- ----------- 220.7/317.3 MB 4.5 MB/s eta 0:00:22
-------------------------- ----------- 220.8/317.3 MB 4.5 MB/s eta 0:00:22
-------------------------- ----------- 220.8/317.3 MB 4.5 MB/s eta 0:00:22
-------------------------- ----------- 220.9/317.3 MB 4.3 MB/s eta 0:00:23
-------------------------- ----------- 221.0/317.3 MB 4.3 MB/s eta 0:00:23
-------------------------- ----------- 221.2/317.3 MB 4.2 MB/s eta 0:00:24
-------------------------- ----------- 221.3/317.3 MB 4.1 MB/s eta 0:00:24
-------------------------- ----------- 221.3/317.3 MB 4.1 MB/s eta 0:00:24
-------------------------- ----------- 221.3/317.3 MB 4.1 MB/s eta 0:00:24
-------------------------- ----------- 221.3/317.3 MB 4.1 MB/s eta 0:00:24
-------------------------- ----------- 221.3/317.3 MB 4.1 MB/s eta 0:00:24
-------------------------- ----------- 221.3/317.3 MB 4.1 MB/s eta 0:00:24
-------------------------- ----------- 221.3/317.3 MB 4.1 MB/s eta 0:00:24
-------------------------- ----------- 221.3/317.3 MB 4.1 MB/s eta 0:00:24
-------------------------- ----------- 221.3/317.3 MB 4.1 MB/s eta 0:00:24
-------------------------- ----------- 222.4/317.3 MB 3.8 MB/s eta 0:00:26
-------------------------- ----------- 222.6/317.3 MB 3.8 MB/s eta 0:00:26
-------------------------- ----------- 222.7/317.3 MB 3.7 MB/s eta 0:00:26
-------------------------- ----------- 222.8/317.3 MB 3.7 MB/s eta 0:00:26
-------------------------- ----------- 222.8/317.3 MB 3.7 MB/s eta 0:00:26
-------------------------- ----------- 222.9/317.3 MB 3.6 MB/s eta 0:00:27
-------------------------- ----------- 223.0/317.3 MB 3.6 MB/s eta 0:00:27
-------------------------- ----------- 223.1/317.3 MB 3.6 MB/s eta 0:00:27
-------------------------- ----------- 223.3/317.3 MB 3.5 MB/s eta 0:00:27
-------------------------- ----------- 223.4/317.3 MB 3.5 MB/s eta 0:00:27
-------------------------- ----------- 223.5/317.3 MB 3.5 MB/s eta 0:00:27
-------------------------- ----------- 223.5/317.3 MB 3.4 MB/s eta 0:00:28
-------------------------- ----------- 223.7/317.3 MB 3.4 MB/s eta 0:00:28
-------------------------- ----------- 223.8/317.3 MB 3.4 MB/s eta 0:00:28
-------------------------- ----------- 223.8/317.3 MB 3.4 MB/s eta 0:00:28
-------------------------- ----------- 223.9/317.3 MB 3.3 MB/s eta 0:00:28
-------------------------- ----------- 223.9/317.3 MB 3.3 MB/s eta 0:00:28
-------------------------- ----------- 223.9/317.3 MB 3.3 MB/s eta 0:00:28
-------------------------- ----------- 223.9/317.3 MB 3.3 MB/s eta 0:00:28
-------------------------- ----------- 223.9/317.3 MB 3.3 MB/s eta 0:00:28
-------------------------- ----------- 223.9/317.3 MB 3.3 MB/s eta 0:00:28
-------------------------- ----------- 224.6/317.3 MB 3.2 MB/s eta 0:00:30
-------------------------- ----------- 224.8/317.3 MB 3.1 MB/s eta 0:00:30
-------------------------- ----------- 224.9/317.3 MB 3.1 MB/s eta 0:00:30
-------------------------- ----------- 225.0/317.3 MB 3.1 MB/s eta 0:00:31
-------------------------- ----------- 225.1/317.3 MB 3.1 MB/s eta 0:00:31
-------------------------- ----------- 225.2/317.3 MB 3.0 MB/s eta 0:00:31
-------------------------- ----------- 225.3/317.3 MB 3.0 MB/s eta 0:00:31
--------------------------- ---------- 225.5/317.3 MB 3.0 MB/s eta 0:00:31
--------------------------- ---------- 225.7/317.3 MB 2.9 MB/s eta 0:00:32
--------------------------- ---------- 225.9/317.3 MB 3.0 MB/s eta 0:00:31
--------------------------- ---------- 226.1/317.3 MB 3.0 MB/s eta 0:00:31
--------------------------- ---------- 226.2/317.3 MB 3.0 MB/s eta 0:00:31
--------------------------- ---------- 226.2/317.3 MB 3.0 MB/s eta 0:00:31
--------------------------- ---------- 226.4/317.3 MB 2.9 MB/s eta 0:00:31
--------------------------- ---------- 226.6/317.3 MB 2.9 MB/s eta 0:00:31
--------------------------- ---------- 226.7/317.3 MB 2.9 MB/s eta 0:00:32
--------------------------- ---------- 226.7/317.3 MB 2.9 MB/s eta 0:00:32
--------------------------- ---------- 226.9/317.3 MB 2.9 MB/s eta 0:00:32
--------------------------- ---------- 227.1/317.3 MB 2.8 MB/s eta 0:00:32
--------------------------- ---------- 227.2/317.3 MB 2.8 MB/s eta 0:00:33
--------------------------- ---------- 227.3/317.3 MB 2.8 MB/s eta 0:00:33
--------------------------- ---------- 227.5/317.3 MB 2.8 MB/s eta 0:00:33
--------------------------- ---------- 227.7/317.3 MB 2.8 MB/s eta 0:00:33
--------------------------- ---------- 227.9/317.3 MB 2.8 MB/s eta 0:00:33
--------------------------- ---------- 228.1/317.3 MB 2.7 MB/s eta 0:00:33
--------------------------- ---------- 228.3/317.3 MB 2.7 MB/s eta 0:00:33
--------------------------- ---------- 228.5/317.3 MB 2.8 MB/s eta 0:00:32
--------------------------- ---------- 228.7/317.3 MB 2.8 MB/s eta 0:00:32
--------------------------- ---------- 228.9/317.3 MB 2.8 MB/s eta 0:00:32
--------------------------- ---------- 229.2/317.3 MB 3.1 MB/s eta 0:00:29
--------------------------- ---------- 229.3/317.3 MB 3.1 MB/s eta 0:00:29
--------------------------- ---------- 229.4/317.3 MB 3.0 MB/s eta 0:00:30
--------------------------- ---------- 229.6/317.3 MB 3.0 MB/s eta 0:00:30
--------------------------- ---------- 229.7/317.3 MB 2.9 MB/s eta 0:00:30
--------------------------- ---------- 229.9/317.3 MB 2.9 MB/s eta 0:00:31
--------------------------- ---------- 230.2/317.3 MB 2.9 MB/s eta 0:00:31
--------------------------- ---------- 230.4/317.3 MB 2.9 MB/s eta 0:00:31
--------------------------- ---------- 230.6/317.3 MB 2.8 MB/s eta 0:00:31
--------------------------- ---------- 230.9/317.3 MB 2.9 MB/s eta 0:00:31
--------------------------- ---------- 231.1/317.3 MB 2.9 MB/s eta 0:00:30
--------------------------- ---------- 231.3/317.3 MB 2.9 MB/s eta 0:00:30
--------------------------- ---------- 231.6/317.3 MB 3.0 MB/s eta 0:00:29
--------------------------- ---------- 231.9/317.3 MB 3.3 MB/s eta 0:00:26
--------------------------- ---------- 232.1/317.3 MB 3.3 MB/s eta 0:00:27
--------------------------- ---------- 232.3/317.3 MB 3.2 MB/s eta 0:00:27
--------------------------- ---------- 232.5/317.3 MB 3.2 MB/s eta 0:00:27
--------------------------- ---------- 232.8/317.3 MB 3.2 MB/s eta 0:00:27
--------------------------- ---------- 233.0/317.3 MB 3.2 MB/s eta 0:00:27
--------------------------- ---------- 233.1/317.3 MB 3.3 MB/s eta 0:00:26
--------------------------- ---------- 233.2/317.3 MB 3.3 MB/s eta 0:00:26
--------------------------- ---------- 233.4/317.3 MB 3.3 MB/s eta 0:00:26
--------------------------- ---------- 233.6/317.3 MB 3.3 MB/s eta 0:00:26
--------------------------- ---------- 233.7/317.3 MB 3.4 MB/s eta 0:00:25
---------------------------- --------- 233.9/317.3 MB 3.4 MB/s eta 0:00:25
---------------------------- --------- 234.2/317.3 MB 3.9 MB/s eta 0:00:22
---------------------------- --------- 234.4/317.3 MB 3.9 MB/s eta 0:00:22
---------------------------- --------- 234.6/317.3 MB 3.8 MB/s eta 0:00:22
---------------------------- --------- 234.8/317.3 MB 3.7 MB/s eta 0:00:23
---------------------------- --------- 235.0/317.3 MB 3.7 MB/s eta 0:00:23
---------------------------- --------- 235.2/317.3 MB 3.8 MB/s eta 0:00:22
---------------------------- --------- 235.4/317.3 MB 3.9 MB/s eta 0:00:22
---------------------------- --------- 235.5/317.3 MB 3.9 MB/s eta 0:00:22
---------------------------- --------- 235.7/317.3 MB 3.9 MB/s eta 0:00:21
---------------------------- --------- 235.9/317.3 MB 3.9 MB/s eta 0:00:21
---------------------------- --------- 236.1/317.3 MB 3.9 MB/s eta 0:00:21
---------------------------- --------- 236.3/317.3 MB 3.9 MB/s eta 0:00:21
---------------------------- --------- 236.4/317.3 MB 3.9 MB/s eta 0:00:21
---------------------------- --------- 236.6/317.3 MB 4.0 MB/s eta 0:00:21
---------------------------- --------- 236.8/317.3 MB 3.9 MB/s eta 0:00:21
---------------------------- --------- 236.9/317.3 MB 4.0 MB/s eta 0:00:21
---------------------------- --------- 237.1/317.3 MB 4.1 MB/s eta 0:00:20
---------------------------- --------- 237.3/317.3 MB 4.0 MB/s eta 0:00:20
---------------------------- --------- 237.5/317.3 MB 4.1 MB/s eta 0:00:20
---------------------------- --------- 237.8/317.3 MB 4.1 MB/s eta 0:00:20
---------------------------- --------- 238.1/317.3 MB 4.2 MB/s eta 0:00:19
---------------------------- --------- 238.3/317.3 MB 4.2 MB/s eta 0:00:19
---------------------------- --------- 238.6/317.3 MB 4.2 MB/s eta 0:00:19
---------------------------- --------- 238.8/317.3 MB 4.2 MB/s eta 0:00:19
---------------------------- --------- 239.0/317.3 MB 4.2 MB/s eta 0:00:19
---------------------------- --------- 239.2/317.3 MB 4.2 MB/s eta 0:00:19
---------------------------- --------- 239.5/317.3 MB 4.2 MB/s eta 0:00:19
---------------------------- --------- 239.6/317.3 MB 4.2 MB/s eta 0:00:19
---------------------------- --------- 239.8/317.3 MB 4.2 MB/s eta 0:00:19
---------------------------- --------- 240.0/317.3 MB 4.2 MB/s eta 0:00:19
---------------------------- --------- 240.2/317.3 MB 4.3 MB/s eta 0:00:18
---------------------------- --------- 240.4/317.3 MB 4.2 MB/s eta 0:00:19
---------------------------- --------- 240.7/317.3 MB 4.3 MB/s eta 0:00:18
---------------------------- --------- 240.8/317.3 MB 4.2 MB/s eta 0:00:19
---------------------------- --------- 241.0/317.3 MB 4.2 MB/s eta 0:00:19
---------------------------- --------- 241.1/317.3 MB 4.2 MB/s eta 0:00:19
---------------------------- --------- 241.3/317.3 MB 4.1 MB/s eta 0:00:19
---------------------------- --------- 241.4/317.3 MB 4.1 MB/s eta 0:00:19
---------------------------- --------- 241.7/317.3 MB 4.1 MB/s eta 0:00:19
---------------------------- --------- 241.9/317.3 MB 4.1 MB/s eta 0:00:19
---------------------------- --------- 242.1/317.3 MB 4.1 MB/s eta 0:00:19
----------------------------- -------- 242.4/317.3 MB 4.1 MB/s eta 0:00:19
----------------------------- -------- 242.5/317.3 MB 4.1 MB/s eta 0:00:19
----------------------------- -------- 242.6/317.3 MB 4.0 MB/s eta 0:00:19
----------------------------- -------- 242.8/317.3 MB 4.0 MB/s eta 0:00:19
----------------------------- -------- 243.0/317.3 MB 4.0 MB/s eta 0:00:19
----------------------------- -------- 243.1/317.3 MB 4.0 MB/s eta 0:00:19
----------------------------- -------- 243.2/317.3 MB 3.9 MB/s eta 0:00:19
----------------------------- -------- 243.2/317.3 MB 3.9 MB/s eta 0:00:20
----------------------------- -------- 243.4/317.3 MB 3.9 MB/s eta 0:00:19
----------------------------- -------- 243.6/317.3 MB 3.9 MB/s eta 0:00:19
----------------------------- -------- 243.6/317.3 MB 3.9 MB/s eta 0:00:19
----------------------------- -------- 243.6/317.3 MB 3.9 MB/s eta 0:00:19
----------------------------- -------- 243.7/317.3 MB 3.7 MB/s eta 0:00:20
----------------------------- -------- 243.8/317.3 MB 3.7 MB/s eta 0:00:20
----------------------------- -------- 243.8/317.3 MB 3.7 MB/s eta 0:00:20
----------------------------- -------- 243.8/317.3 MB 3.7 MB/s eta 0:00:20
----------------------------- -------- 243.8/317.3 MB 3.7 MB/s eta 0:00:20
----------------------------- -------- 244.5/317.3 MB 3.7 MB/s eta 0:00:20
----------------------------- -------- 244.7/317.3 MB 3.7 MB/s eta 0:00:20
----------------------------- -------- 244.7/317.3 MB 3.6 MB/s eta 0:00:21
----------------------------- -------- 244.9/317.3 MB 3.6 MB/s eta 0:00:21
----------------------------- -------- 245.0/317.3 MB 3.6 MB/s eta 0:00:21
----------------------------- -------- 245.2/317.3 MB 3.6 MB/s eta 0:00:21
----------------------------- -------- 245.3/317.3 MB 3.6 MB/s eta 0:00:21
----------------------------- -------- 245.5/317.3 MB 3.6 MB/s eta 0:00:21
----------------------------- -------- 245.6/317.3 MB 3.6 MB/s eta 0:00:21
----------------------------- -------- 245.8/317.3 MB 3.6 MB/s eta 0:00:20
----------------------------- -------- 246.0/317.3 MB 3.6 MB/s eta 0:00:21
----------------------------- -------- 246.2/317.3 MB 3.6 MB/s eta 0:00:20
----------------------------- -------- 246.3/317.3 MB 3.5 MB/s eta 0:00:21
----------------------------- -------- 246.4/317.3 MB 3.5 MB/s eta 0:00:21
----------------------------- -------- 246.5/317.3 MB 3.5 MB/s eta 0:00:21
----------------------------- -------- 246.7/317.3 MB 3.5 MB/s eta 0:00:21
----------------------------- -------- 246.8/317.3 MB 3.5 MB/s eta 0:00:21
----------------------------- -------- 247.0/317.3 MB 3.5 MB/s eta 0:00:21
----------------------------- -------- 247.1/317.3 MB 3.5 MB/s eta 0:00:21
----------------------------- -------- 247.3/317.3 MB 3.5 MB/s eta 0:00:21
----------------------------- -------- 247.5/317.3 MB 3.5 MB/s eta 0:00:21
----------------------------- -------- 247.7/317.3 MB 3.5 MB/s eta 0:00:21
----------------------------- -------- 247.9/317.3 MB 3.5 MB/s eta 0:00:20
----------------------------- -------- 248.0/317.3 MB 3.4 MB/s eta 0:00:21
----------------------------- -------- 248.2/317.3 MB 3.4 MB/s eta 0:00:21
----------------------------- -------- 248.4/317.3 MB 3.4 MB/s eta 0:00:21
----------------------------- -------- 248.5/317.3 MB 3.4 MB/s eta 0:00:21
----------------------------- -------- 248.6/317.3 MB 3.4 MB/s eta 0:00:21
----------------------------- -------- 248.7/317.3 MB 3.4 MB/s eta 0:00:21
----------------------------- -------- 248.9/317.3 MB 3.3 MB/s eta 0:00:21
----------------------------- -------- 248.9/317.3 MB 3.3 MB/s eta 0:00:21
----------------------------- -------- 248.9/317.3 MB 3.3 MB/s eta 0:00:21
----------------------------- -------- 249.0/317.3 MB 3.2 MB/s eta 0:00:22
----------------------------- -------- 249.1/317.3 MB 3.2 MB/s eta 0:00:22
----------------------------- -------- 249.2/317.3 MB 3.2 MB/s eta 0:00:22
----------------------------- -------- 249.4/317.3 MB 3.2 MB/s eta 0:00:22
----------------------------- -------- 249.5/317.3 MB 3.2 MB/s eta 0:00:22
----------------------------- -------- 249.7/317.3 MB 3.1 MB/s eta 0:00:22
----------------------------- -------- 249.7/317.3 MB 3.1 MB/s eta 0:00:22
----------------------------- -------- 249.8/317.3 MB 3.1 MB/s eta 0:00:22
----------------------------- -------- 249.9/317.3 MB 3.1 MB/s eta 0:00:22
----------------------------- -------- 250.1/317.3 MB 3.1 MB/s eta 0:00:22
----------------------------- -------- 250.1/317.3 MB 3.1 MB/s eta 0:00:22
----------------------------- -------- 250.2/317.3 MB 3.0 MB/s eta 0:00:23
----------------------------- -------- 250.3/317.3 MB 3.0 MB/s eta 0:00:23
----------------------------- -------- 250.5/317.3 MB 3.0 MB/s eta 0:00:23
------------------------------ ------- 250.6/317.3 MB 3.0 MB/s eta 0:00:23
------------------------------ ------- 250.8/317.3 MB 3.0 MB/s eta 0:00:23
------------------------------ ------- 250.9/317.3 MB 2.9 MB/s eta 0:00:23
------------------------------ ------- 251.0/317.3 MB 2.9 MB/s eta 0:00:23
------------------------------ ------- 251.1/317.3 MB 2.9 MB/s eta 0:00:23
------------------------------ ------- 251.2/317.3 MB 2.9 MB/s eta 0:00:23
------------------------------ ------- 251.3/317.3 MB 2.9 MB/s eta 0:00:23
------------------------------ ------- 251.5/317.3 MB 2.9 MB/s eta 0:00:23
------------------------------ ------- 251.5/317.3 MB 2.9 MB/s eta 0:00:23
------------------------------ ------- 251.8/317.3 MB 2.9 MB/s eta 0:00:23
------------------------------ ------- 252.0/317.3 MB 2.9 MB/s eta 0:00:23
------------------------------ ------- 252.0/317.3 MB 2.8 MB/s eta 0:00:24
------------------------------ ------- 252.2/317.3 MB 2.8 MB/s eta 0:00:23
------------------------------ ------- 252.4/317.3 MB 2.8 MB/s eta 0:00:23
------------------------------ ------- 252.4/317.3 MB 2.8 MB/s eta 0:00:24
------------------------------ ------- 252.7/317.3 MB 2.8 MB/s eta 0:00:23
------------------------------ ------- 252.8/317.3 MB 2.8 MB/s eta 0:00:23
------------------------------ ------- 252.9/317.3 MB 2.8 MB/s eta 0:00:24
------------------------------ ------- 253.0/317.3 MB 2.8 MB/s eta 0:00:24
------------------------------ ------- 253.1/317.3 MB 2.8 MB/s eta 0:00:24
------------------------------ ------- 253.2/317.3 MB 2.7 MB/s eta 0:00:24
------------------------------ ------- 253.3/317.3 MB 2.7 MB/s eta 0:00:24
------------------------------ ------- 253.5/317.3 MB 2.8 MB/s eta 0:00:24
------------------------------ ------- 253.5/317.3 MB 2.8 MB/s eta 0:00:24
------------------------------ ------- 253.7/317.3 MB 2.8 MB/s eta 0:00:24
------------------------------ ------- 253.9/317.3 MB 2.9 MB/s eta 0:00:23
------------------------------ ------- 253.9/317.3 MB 2.9 MB/s eta 0:00:23
------------------------------ ------- 254.0/317.3 MB 3.0 MB/s eta 0:00:22
------------------------------ ------- 254.2/317.3 MB 2.9 MB/s eta 0:00:22
------------------------------ ------- 254.3/317.3 MB 2.9 MB/s eta 0:00:22
------------------------------ ------- 254.5/317.3 MB 2.9 MB/s eta 0:00:22
------------------------------ ------- 254.7/317.3 MB 2.8 MB/s eta 0:00:23
------------------------------ ------- 254.8/317.3 MB 2.8 MB/s eta 0:00:23
------------------------------ ------- 254.9/317.3 MB 2.8 MB/s eta 0:00:23
------------------------------ ------- 255.3/317.3 MB 2.9 MB/s eta 0:00:22
------------------------------ ------- 255.3/317.3 MB 2.9 MB/s eta 0:00:22
------------------------------ ------- 255.3/317.3 MB 2.8 MB/s eta 0:00:22
------------------------------ ------- 255.6/317.3 MB 2.8 MB/s eta 0:00:22
------------------------------ ------- 255.8/317.3 MB 2.8 MB/s eta 0:00:22
------------------------------ ------- 255.9/317.3 MB 2.8 MB/s eta 0:00:22
------------------------------ ------- 256.1/317.3 MB 2.8 MB/s eta 0:00:22
------------------------------ ------- 256.2/317.3 MB 2.8 MB/s eta 0:00:22
------------------------------ ------- 256.2/317.3 MB 2.8 MB/s eta 0:00:22
------------------------------ ------- 256.2/317.3 MB 2.8 MB/s eta 0:00:22
------------------------------ ------- 256.3/317.3 MB 2.7 MB/s eta 0:00:23
------------------------------ ------- 256.3/317.3 MB 2.7 MB/s eta 0:00:23
------------------------------ ------- 256.5/317.3 MB 2.7 MB/s eta 0:00:23
------------------------------ ------- 256.7/317.3 MB 2.7 MB/s eta 0:00:23
------------------------------ ------- 256.8/317.3 MB 2.7 MB/s eta 0:00:23
------------------------------ ------- 256.9/317.3 MB 2.7 MB/s eta 0:00:23
------------------------------ ------- 257.0/317.3 MB 2.7 MB/s eta 0:00:23
------------------------------ ------- 257.1/317.3 MB 2.7 MB/s eta 0:00:23
------------------------------ ------- 257.2/317.3 MB 2.7 MB/s eta 0:00:23
------------------------------ ------- 257.4/317.3 MB 2.7 MB/s eta 0:00:23
------------------------------ ------- 257.5/317.3 MB 2.7 MB/s eta 0:00:23
------------------------------ ------- 257.6/317.3 MB 2.7 MB/s eta 0:00:23
------------------------------ ------- 257.8/317.3 MB 2.7 MB/s eta 0:00:23
------------------------------ ------- 257.9/317.3 MB 2.6 MB/s eta 0:00:23
------------------------------ ------- 258.1/317.3 MB 2.6 MB/s eta 0:00:23
------------------------------ ------- 258.3/317.3 MB 2.7 MB/s eta 0:00:23
------------------------------ ------- 258.4/317.3 MB 2.6 MB/s eta 0:00:23
------------------------------ ------- 258.5/317.3 MB 2.6 MB/s eta 0:00:23
------------------------------ ------- 258.6/317.3 MB 2.6 MB/s eta 0:00:23
------------------------------ ------- 258.8/317.3 MB 2.6 MB/s eta 0:00:23
------------------------------- ------ 259.0/317.3 MB 2.7 MB/s eta 0:00:22
------------------------------- ------ 259.2/317.3 MB 2.7 MB/s eta 0:00:22
------------------------------- ------ 259.5/317.3 MB 2.8 MB/s eta 0:00:21
------------------------------- ------ 259.7/317.3 MB 2.8 MB/s eta 0:00:21
------------------------------- ------ 259.9/317.3 MB 2.8 MB/s eta 0:00:21
------------------------------- ------ 259.9/317.3 MB 2.8 MB/s eta 0:00:21
------------------------------- ------ 260.0/317.3 MB 2.8 MB/s eta 0:00:21
------------------------------- ------ 260.2/317.3 MB 2.8 MB/s eta 0:00:21
------------------------------- ------ 260.4/317.3 MB 2.8 MB/s eta 0:00:21
------------------------------- ------ 260.6/317.3 MB 2.9 MB/s eta 0:00:20
------------------------------- ------ 260.7/317.3 MB 2.8 MB/s eta 0:00:20
------------------------------- ------ 260.8/317.3 MB 2.8 MB/s eta 0:00:20
------------------------------- ------ 261.0/317.3 MB 2.8 MB/s eta 0:00:20
------------------------------- ------ 261.3/317.3 MB 2.9 MB/s eta 0:00:20
------------------------------- ------ 261.4/317.3 MB 2.9 MB/s eta 0:00:20
------------------------------- ------ 261.5/317.3 MB 2.9 MB/s eta 0:00:20
------------------------------- ------ 261.7/317.3 MB 2.9 MB/s eta 0:00:19
------------------------------- ------ 261.9/317.3 MB 2.9 MB/s eta 0:00:19
------------------------------- ------ 262.2/317.3 MB 3.0 MB/s eta 0:00:19
------------------------------- ------ 262.3/317.3 MB 3.0 MB/s eta 0:00:19
------------------------------- ------ 262.3/317.3 MB 3.0 MB/s eta 0:00:19
------------------------------- ------ 262.5/317.3 MB 2.9 MB/s eta 0:00:19
------------------------------- ------ 262.6/317.3 MB 2.9 MB/s eta 0:00:19
------------------------------- ------ 262.8/317.3 MB 3.0 MB/s eta 0:00:19
------------------------------- ------ 263.0/317.3 MB 3.0 MB/s eta 0:00:19
------------------------------- ------ 263.2/317.3 MB 3.0 MB/s eta 0:00:19
------------------------------- ------ 263.4/317.3 MB 3.0 MB/s eta 0:00:18
------------------------------- ------ 263.5/317.3 MB 3.0 MB/s eta 0:00:18
------------------------------- ------ 263.7/317.3 MB 3.0 MB/s eta 0:00:18
------------------------------- ------ 263.8/317.3 MB 3.1 MB/s eta 0:00:18
------------------------------- ------ 264.0/317.3 MB 3.1 MB/s eta 0:00:18
------------------------------- ------ 264.2/317.3 MB 3.1 MB/s eta 0:00:17
------------------------------- ------ 264.5/317.3 MB 3.2 MB/s eta 0:00:17
------------------------------- ------ 264.7/317.3 MB 3.2 MB/s eta 0:00:17
------------------------------- ------ 264.9/317.3 MB 3.2 MB/s eta 0:00:17
------------------------------- ------ 265.1/317.3 MB 3.2 MB/s eta 0:00:17
------------------------------- ------ 265.2/317.3 MB 3.2 MB/s eta 0:00:17
------------------------------- ------ 265.2/317.3 MB 3.2 MB/s eta 0:00:17
------------------------------- ------ 265.3/317.3 MB 3.1 MB/s eta 0:00:17
------------------------------- ------ 265.3/317.3 MB 3.1 MB/s eta 0:00:17
------------------------------- ------ 265.4/317.3 MB 3.1 MB/s eta 0:00:17
------------------------------- ------ 265.6/317.3 MB 3.1 MB/s eta 0:00:17
------------------------------- ------ 265.7/317.3 MB 3.1 MB/s eta 0:00:17
------------------------------- ------ 265.9/317.3 MB 3.1 MB/s eta 0:00:17
------------------------------- ------ 266.2/317.3 MB 3.1 MB/s eta 0:00:17
------------------------------- ------ 266.4/317.3 MB 3.1 MB/s eta 0:00:17
------------------------------- ------ 266.5/317.3 MB 3.3 MB/s eta 0:00:16
------------------------------- ------ 266.5/317.3 MB 3.3 MB/s eta 0:00:16
------------------------------- ------ 266.6/317.3 MB 3.3 MB/s eta 0:00:16
------------------------------- ------ 266.7/317.3 MB 3.2 MB/s eta 0:00:16
------------------------------- ------ 266.8/317.3 MB 3.2 MB/s eta 0:00:16
------------------------------- ------ 266.8/317.3 MB 3.1 MB/s eta 0:00:17
------------------------------- ------ 266.9/317.3 MB 3.1 MB/s eta 0:00:17
------------------------------- ------ 267.0/317.3 MB 3.1 MB/s eta 0:00:17
------------------------------- ------ 267.0/317.3 MB 3.1 MB/s eta 0:00:17
------------------------------- ------ 267.0/317.3 MB 3.1 MB/s eta 0:00:17
-------------------------------- ----- 267.7/317.3 MB 3.2 MB/s eta 0:00:16
-------------------------------- ----- 267.9/317.3 MB 3.3 MB/s eta 0:00:16
-------------------------------- ----- 268.0/317.3 MB 3.3 MB/s eta 0:00:16
-------------------------------- ----- 268.3/317.3 MB 3.3 MB/s eta 0:00:15
-------------------------------- ----- 268.5/317.3 MB 3.3 MB/s eta 0:00:15
-------------------------------- ----- 268.7/317.3 MB 3.3 MB/s eta 0:00:15
-------------------------------- ----- 268.8/317.3 MB 3.3 MB/s eta 0:00:15
-------------------------------- ----- 268.8/317.3 MB 3.3 MB/s eta 0:00:15
-------------------------------- ----- 269.0/317.3 MB 3.3 MB/s eta 0:00:15
-------------------------------- ----- 269.2/317.3 MB 3.3 MB/s eta 0:00:15
-------------------------------- ----- 269.4/317.3 MB 3.3 MB/s eta 0:00:15
-------------------------------- ----- 269.6/317.3 MB 3.3 MB/s eta 0:00:15
-------------------------------- ----- 269.8/317.3 MB 3.3 MB/s eta 0:00:15
-------------------------------- ----- 270.0/317.3 MB 3.3 MB/s eta 0:00:15
-------------------------------- ----- 270.2/317.3 MB 3.3 MB/s eta 0:00:15
-------------------------------- ----- 270.5/317.3 MB 3.4 MB/s eta 0:00:14
-------------------------------- ----- 270.7/317.3 MB 3.4 MB/s eta 0:00:14
-------------------------------- ----- 270.9/317.3 MB 3.4 MB/s eta 0:00:14
-------------------------------- ----- 271.1/317.3 MB 3.4 MB/s eta 0:00:14
-------------------------------- ----- 271.3/317.3 MB 3.4 MB/s eta 0:00:14
-------------------------------- ----- 271.5/317.3 MB 3.4 MB/s eta 0:00:14
-------------------------------- ----- 271.6/317.3 MB 3.4 MB/s eta 0:00:14
-------------------------------- ----- 271.6/317.3 MB 3.4 MB/s eta 0:00:14
-------------------------------- ----- 271.9/317.3 MB 3.4 MB/s eta 0:00:14
-------------------------------- ----- 272.1/317.3 MB 3.4 MB/s eta 0:00:14
-------------------------------- ----- 272.3/317.3 MB 3.4 MB/s eta 0:00:14
-------------------------------- ----- 272.5/317.3 MB 3.4 MB/s eta 0:00:14
-------------------------------- ----- 272.8/317.3 MB 3.5 MB/s eta 0:00:13
-------------------------------- ----- 272.9/317.3 MB 3.5 MB/s eta 0:00:13
-------------------------------- ----- 273.0/317.3 MB 3.5 MB/s eta 0:00:13
-------------------------------- ----- 273.1/317.3 MB 3.4 MB/s eta 0:00:13
-------------------------------- ----- 273.2/317.3 MB 3.4 MB/s eta 0:00:13
-------------------------------- ----- 273.3/317.3 MB 3.4 MB/s eta 0:00:14
-------------------------------- ----- 273.5/317.3 MB 3.4 MB/s eta 0:00:13
-------------------------------- ----- 273.6/317.3 MB 3.4 MB/s eta 0:00:13
-------------------------------- ----- 273.7/317.3 MB 3.4 MB/s eta 0:00:13
-------------------------------- ----- 273.8/317.3 MB 3.3 MB/s eta 0:00:14
-------------------------------- ----- 273.8/317.3 MB 3.3 MB/s eta 0:00:14
-------------------------------- ----- 273.9/317.3 MB 3.3 MB/s eta 0:00:14
-------------------------------- ----- 273.9/317.3 MB 3.3 MB/s eta 0:00:14
-------------------------------- ----- 274.1/317.3 MB 3.2 MB/s eta 0:00:14
-------------------------------- ----- 274.3/317.3 MB 3.2 MB/s eta 0:00:14
-------------------------------- ----- 274.4/317.3 MB 3.2 MB/s eta 0:00:14
-------------------------------- ----- 274.4/317.3 MB 3.1 MB/s eta 0:00:14
-------------------------------- ----- 274.6/317.3 MB 3.1 MB/s eta 0:00:14
-------------------------------- ----- 274.7/317.3 MB 3.1 MB/s eta 0:00:14
-------------------------------- ----- 274.8/317.3 MB 3.1 MB/s eta 0:00:14
-------------------------------- ----- 275.0/317.3 MB 3.1 MB/s eta 0:00:14
-------------------------------- ----- 275.1/317.3 MB 3.1 MB/s eta 0:00:14
-------------------------------- ----- 275.2/317.3 MB 3.0 MB/s eta 0:00:14
-------------------------------- ----- 275.4/317.3 MB 3.1 MB/s eta 0:00:14
--------------------------------- ---- 275.7/317.3 MB 3.2 MB/s eta 0:00:14
--------------------------------- ---- 275.8/317.3 MB 3.2 MB/s eta 0:00:14
--------------------------------- ---- 276.0/317.3 MB 3.2 MB/s eta 0:00:14
--------------------------------- ---- 276.2/317.3 MB 3.2 MB/s eta 0:00:13
--------------------------------- ---- 276.2/317.3 MB 3.2 MB/s eta 0:00:13
--------------------------------- ---- 276.4/317.3 MB 3.1 MB/s eta 0:00:14
--------------------------------- ---- 276.5/317.3 MB 3.1 MB/s eta 0:00:14
--------------------------------- ---- 276.6/317.3 MB 3.1 MB/s eta 0:00:14
--------------------------------- ---- 276.6/317.3 MB 3.1 MB/s eta 0:00:14
--------------------------------- ---- 276.7/317.3 MB 3.0 MB/s eta 0:00:14
--------------------------------- ---- 276.8/317.3 MB 3.0 MB/s eta 0:00:14
--------------------------------- ---- 276.8/317.3 MB 3.0 MB/s eta 0:00:14
--------------------------------- ---- 276.8/317.3 MB 3.0 MB/s eta 0:00:14
--------------------------------- ---- 276.8/317.3 MB 3.0 MB/s eta 0:00:14
--------------------------------- ---- 276.8/317.3 MB 3.0 MB/s eta 0:00:14
--------------------------------- ---- 276.8/317.3 MB 3.0 MB/s eta 0:00:14
--------------------------------- ---- 276.8/317.3 MB 3.0 MB/s eta 0:00:14
--------------------------------- ---- 276.8/317.3 MB 3.0 MB/s eta 0:00:14
--------------------------------- ---- 276.8/317.3 MB 3.0 MB/s eta 0:00:14
--------------------------------- ---- 276.8/317.3 MB 3.0 MB/s eta 0:00:14
--------------------------------- ---- 276.8/317.3 MB 3.0 MB/s eta 0:00:14
--------------------------------- ---- 277.2/317.3 MB 2.8 MB/s eta 0:00:15
--------------------------------- ---- 277.6/317.3 MB 2.9 MB/s eta 0:00:14
--------------------------------- ---- 277.7/317.3 MB 2.9 MB/s eta 0:00:14
--------------------------------- ---- 277.7/317.3 MB 2.8 MB/s eta 0:00:15
--------------------------------- ---- 277.9/317.3 MB 2.8 MB/s eta 0:00:15
--------------------------------- ---- 278.1/317.3 MB 2.8 MB/s eta 0:00:15
--------------------------------- ---- 278.3/317.3 MB 2.8 MB/s eta 0:00:14
--------------------------------- ---- 278.4/317.3 MB 2.8 MB/s eta 0:00:15
--------------------------------- ---- 278.6/317.3 MB 2.8 MB/s eta 0:00:15
--------------------------------- ---- 278.7/317.3 MB 2.8 MB/s eta 0:00:14
--------------------------------- ---- 278.8/317.3 MB 2.8 MB/s eta 0:00:15
--------------------------------- ---- 278.9/317.3 MB 2.7 MB/s eta 0:00:15
--------------------------------- ---- 279.0/317.3 MB 2.7 MB/s eta 0:00:14
--------------------------------- ---- 279.2/317.3 MB 2.8 MB/s eta 0:00:14
--------------------------------- ---- 279.3/317.3 MB 2.8 MB/s eta 0:00:14
--------------------------------- ---- 279.4/317.3 MB 2.7 MB/s eta 0:00:14
--------------------------------- ---- 279.6/317.3 MB 2.8 MB/s eta 0:00:14
--------------------------------- ---- 279.8/317.3 MB 2.7 MB/s eta 0:00:14
--------------------------------- ---- 279.9/317.3 MB 2.7 MB/s eta 0:00:14
--------------------------------- ---- 280.1/317.3 MB 2.7 MB/s eta 0:00:14
--------------------------------- ---- 280.2/317.3 MB 2.7 MB/s eta 0:00:14
--------------------------------- ---- 280.2/317.3 MB 2.7 MB/s eta 0:00:14
--------------------------------- ---- 280.3/317.3 MB 2.7 MB/s eta 0:00:14
--------------------------------- ---- 280.5/317.3 MB 2.7 MB/s eta 0:00:14
--------------------------------- ---- 280.7/317.3 MB 2.7 MB/s eta 0:00:14
--------------------------------- ---- 280.8/317.3 MB 2.7 MB/s eta 0:00:14
--------------------------------- ---- 280.9/317.3 MB 2.6 MB/s eta 0:00:14
--------------------------------- ---- 281.0/317.3 MB 2.6 MB/s eta 0:00:14
--------------------------------- ---- 281.1/317.3 MB 2.6 MB/s eta 0:00:14
--------------------------------- ---- 281.2/317.3 MB 2.6 MB/s eta 0:00:14
--------------------------------- ---- 281.4/317.3 MB 2.6 MB/s eta 0:00:14
--------------------------------- ---- 281.5/317.3 MB 2.5 MB/s eta 0:00:15
--------------------------------- ---- 281.6/317.3 MB 2.6 MB/s eta 0:00:14
--------------------------------- ---- 281.8/317.3 MB 2.6 MB/s eta 0:00:14
--------------------------------- ---- 281.9/317.3 MB 2.6 MB/s eta 0:00:14
--------------------------------- ---- 282.1/317.3 MB 2.6 MB/s eta 0:00:14
--------------------------------- ---- 282.3/317.3 MB 2.6 MB/s eta 0:00:14
--------------------------------- ---- 282.4/317.3 MB 2.5 MB/s eta 0:00:14
--------------------------------- ---- 282.6/317.3 MB 2.6 MB/s eta 0:00:14
--------------------------------- ---- 282.7/317.3 MB 2.5 MB/s eta 0:00:14
--------------------------------- ---- 282.8/317.3 MB 2.5 MB/s eta 0:00:14
--------------------------------- ---- 282.9/317.3 MB 2.5 MB/s eta 0:00:14
--------------------------------- ---- 283.2/317.3 MB 2.5 MB/s eta 0:00:14
--------------------------------- ---- 283.3/317.3 MB 2.5 MB/s eta 0:00:14
--------------------------------- ---- 283.5/317.3 MB 2.5 MB/s eta 0:00:14
--------------------------------- ---- 283.7/317.3 MB 2.6 MB/s eta 0:00:14
--------------------------------- ---- 283.8/317.3 MB 2.6 MB/s eta 0:00:14
---------------------------------- --- 284.0/317.3 MB 2.6 MB/s eta 0:00:13
---------------------------------- --- 284.1/317.3 MB 2.6 MB/s eta 0:00:13
---------------------------------- --- 284.2/317.3 MB 2.7 MB/s eta 0:00:13
---------------------------------- --- 284.4/317.3 MB 2.7 MB/s eta 0:00:13
---------------------------------- --- 284.4/317.3 MB 2.6 MB/s eta 0:00:13
---------------------------------- --- 284.5/317.3 MB 2.6 MB/s eta 0:00:13
---------------------------------- --- 284.6/317.3 MB 2.6 MB/s eta 0:00:13
---------------------------------- --- 284.7/317.3 MB 2.6 MB/s eta 0:00:13
---------------------------------- --- 284.9/317.3 MB 2.6 MB/s eta 0:00:13
---------------------------------- --- 285.2/317.3 MB 2.7 MB/s eta 0:00:13
---------------------------------- --- 285.3/317.3 MB 2.7 MB/s eta 0:00:13
---------------------------------- --- 285.3/317.3 MB 2.6 MB/s eta 0:00:13
---------------------------------- --- 285.4/317.3 MB 2.7 MB/s eta 0:00:13
---------------------------------- --- 285.7/317.3 MB 2.7 MB/s eta 0:00:12
---------------------------------- --- 285.8/317.3 MB 2.7 MB/s eta 0:00:12
---------------------------------- --- 285.8/317.3 MB 2.7 MB/s eta 0:00:12
---------------------------------- --- 286.1/317.3 MB 2.6 MB/s eta 0:00:12
---------------------------------- --- 286.3/317.3 MB 2.7 MB/s eta 0:00:12
---------------------------------- --- 286.5/317.3 MB 2.7 MB/s eta 0:00:12
---------------------------------- --- 286.7/317.3 MB 2.7 MB/s eta 0:00:12
---------------------------------- --- 286.9/317.3 MB 2.8 MB/s eta 0:00:11
---------------------------------- --- 287.1/317.3 MB 3.2 MB/s eta 0:00:10
---------------------------------- --- 287.3/317.3 MB 3.1 MB/s eta 0:00:10
---------------------------------- --- 287.4/317.3 MB 3.2 MB/s eta 0:00:10
---------------------------------- --- 287.6/317.3 MB 3.1 MB/s eta 0:00:10
---------------------------------- --- 287.8/317.3 MB 3.1 MB/s eta 0:00:10
---------------------------------- --- 288.0/317.3 MB 3.1 MB/s eta 0:00:10
---------------------------------- --- 288.2/317.3 MB 3.2 MB/s eta 0:00:10
---------------------------------- --- 288.5/317.3 MB 3.2 MB/s eta 0:00:10
---------------------------------- --- 288.6/317.3 MB 3.2 MB/s eta 0:00:10
---------------------------------- --- 288.8/317.3 MB 3.2 MB/s eta 0:00:09
---------------------------------- --- 288.9/317.3 MB 3.2 MB/s eta 0:00:09
---------------------------------- --- 289.1/317.3 MB 3.2 MB/s eta 0:00:09
---------------------------------- --- 289.2/317.3 MB 3.2 MB/s eta 0:00:09
---------------------------------- --- 289.4/317.3 MB 3.2 MB/s eta 0:00:09
---------------------------------- --- 289.7/317.3 MB 3.3 MB/s eta 0:00:09
---------------------------------- --- 289.8/317.3 MB 3.3 MB/s eta 0:00:09
---------------------------------- --- 289.9/317.3 MB 3.3 MB/s eta 0:00:09
---------------------------------- --- 290.0/317.3 MB 3.2 MB/s eta 0:00:09
---------------------------------- --- 290.2/317.3 MB 3.2 MB/s eta 0:00:09
---------------------------------- --- 290.4/317.3 MB 3.3 MB/s eta 0:00:09
---------------------------------- --- 290.6/317.3 MB 3.3 MB/s eta 0:00:08
---------------------------------- --- 290.8/317.3 MB 3.3 MB/s eta 0:00:08
---------------------------------- --- 291.1/317.3 MB 3.4 MB/s eta 0:00:08
---------------------------------- --- 291.3/317.3 MB 3.4 MB/s eta 0:00:08
---------------------------------- --- 291.5/317.3 MB 3.5 MB/s eta 0:00:08
---------------------------------- --- 291.8/317.3 MB 3.6 MB/s eta 0:00:08
---------------------------------- --- 291.9/317.3 MB 3.5 MB/s eta 0:00:08
---------------------------------- --- 292.1/317.3 MB 3.5 MB/s eta 0:00:08
----------------------------------- -- 292.3/317.3 MB 3.6 MB/s eta 0:00:08
----------------------------------- -- 292.5/317.3 MB 3.6 MB/s eta 0:00:07
----------------------------------- -- 292.7/317.3 MB 3.6 MB/s eta 0:00:07
----------------------------------- -- 292.9/317.3 MB 3.6 MB/s eta 0:00:07
----------------------------------- -- 293.0/317.3 MB 3.6 MB/s eta 0:00:07
----------------------------------- -- 293.2/317.3 MB 3.6 MB/s eta 0:00:07
----------------------------------- -- 293.3/317.3 MB 3.6 MB/s eta 0:00:07
----------------------------------- -- 293.4/317.3 MB 3.6 MB/s eta 0:00:07
----------------------------------- -- 293.6/317.3 MB 3.6 MB/s eta 0:00:07
----------------------------------- -- 293.7/317.3 MB 3.6 MB/s eta 0:00:07
----------------------------------- -- 293.9/317.3 MB 3.6 MB/s eta 0:00:07
----------------------------------- -- 294.1/317.3 MB 3.6 MB/s eta 0:00:07
----------------------------------- -- 294.3/317.3 MB 3.6 MB/s eta 0:00:07
----------------------------------- -- 294.5/317.3 MB 3.6 MB/s eta 0:00:07
----------------------------------- -- 294.6/317.3 MB 3.6 MB/s eta 0:00:07
----------------------------------- -- 294.8/317.3 MB 3.7 MB/s eta 0:00:07
----------------------------------- -- 294.9/317.3 MB 3.7 MB/s eta 0:00:07
----------------------------------- -- 295.1/317.3 MB 3.7 MB/s eta 0:00:06
----------------------------------- -- 295.3/317.3 MB 3.7 MB/s eta 0:00:06
----------------------------------- -- 295.5/317.3 MB 3.7 MB/s eta 0:00:06
----------------------------------- -- 295.6/317.3 MB 3.8 MB/s eta 0:00:06
----------------------------------- -- 295.9/317.3 MB 3.8 MB/s eta 0:00:06
----------------------------------- -- 296.0/317.3 MB 3.7 MB/s eta 0:00:06
----------------------------------- -- 296.2/317.3 MB 3.8 MB/s eta 0:00:06
----------------------------------- -- 296.4/317.3 MB 3.8 MB/s eta 0:00:06
----------------------------------- -- 296.5/317.3 MB 3.7 MB/s eta 0:00:06
----------------------------------- -- 296.7/317.3 MB 3.7 MB/s eta 0:00:06
----------------------------------- -- 296.8/317.3 MB 3.8 MB/s eta 0:00:06
----------------------------------- -- 297.1/317.3 MB 3.7 MB/s eta 0:00:06
----------------------------------- -- 297.3/317.3 MB 3.8 MB/s eta 0:00:06
----------------------------------- -- 297.4/317.3 MB 3.7 MB/s eta 0:00:06
----------------------------------- -- 297.4/317.3 MB 3.7 MB/s eta 0:00:06
----------------------------------- -- 297.4/317.3 MB 3.7 MB/s eta 0:00:06
----------------------------------- -- 297.4/317.3 MB 3.7 MB/s eta 0:00:06
----------------------------------- -- 297.9/317.3 MB 3.7 MB/s eta 0:00:06
----------------------------------- -- 298.0/317.3 MB 3.6 MB/s eta 0:00:06
----------------------------------- -- 298.1/317.3 MB 3.6 MB/s eta 0:00:06
----------------------------------- -- 298.3/317.3 MB 3.6 MB/s eta 0:00:06
----------------------------------- -- 298.5/317.3 MB 3.6 MB/s eta 0:00:06
----------------------------------- -- 298.6/317.3 MB 3.6 MB/s eta 0:00:06
----------------------------------- -- 298.7/317.3 MB 3.5 MB/s eta 0:00:06
----------------------------------- -- 298.9/317.3 MB 3.6 MB/s eta 0:00:06
----------------------------------- -- 299.1/317.3 MB 3.6 MB/s eta 0:00:06
----------------------------------- -- 299.2/317.3 MB 3.5 MB/s eta 0:00:06
----------------------------------- -- 299.4/317.3 MB 3.5 MB/s eta 0:00:06
----------------------------------- -- 299.5/317.3 MB 3.6 MB/s eta 0:00:05
----------------------------------- -- 299.7/317.3 MB 3.5 MB/s eta 0:00:05
----------------------------------- -- 299.9/317.3 MB 3.5 MB/s eta 0:00:05
----------------------------------- -- 300.1/317.3 MB 3.5 MB/s eta 0:00:05
----------------------------------- -- 300.3/317.3 MB 3.6 MB/s eta 0:00:05
----------------------------------- -- 300.5/317.3 MB 3.6 MB/s eta 0:00:05
------------------------------------ - 300.7/317.3 MB 3.6 MB/s eta 0:00:05
------------------------------------ - 300.8/317.3 MB 3.5 MB/s eta 0:00:05
------------------------------------ - 301.1/317.3 MB 3.6 MB/s eta 0:00:05
------------------------------------ - 301.2/317.3 MB 3.5 MB/s eta 0:00:05
------------------------------------ - 301.4/317.3 MB 3.5 MB/s eta 0:00:05
------------------------------------ - 301.5/317.3 MB 3.5 MB/s eta 0:00:05
------------------------------------ - 301.6/317.3 MB 3.5 MB/s eta 0:00:05
------------------------------------ - 301.8/317.3 MB 3.5 MB/s eta 0:00:05
------------------------------------ - 301.9/317.3 MB 3.4 MB/s eta 0:00:05
------------------------------------ - 302.1/317.3 MB 3.4 MB/s eta 0:00:05
------------------------------------ - 302.2/317.3 MB 3.5 MB/s eta 0:00:05
------------------------------------ - 302.2/317.3 MB 3.5 MB/s eta 0:00:05
------------------------------------ - 302.2/317.3 MB 3.3 MB/s eta 0:00:05
------------------------------------ - 302.3/317.3 MB 3.3 MB/s eta 0:00:05
------------------------------------ - 302.5/317.3 MB 3.3 MB/s eta 0:00:05
------------------------------------ - 302.6/317.3 MB 3.2 MB/s eta 0:00:05
------------------------------------ - 302.7/317.3 MB 3.2 MB/s eta 0:00:05
------------------------------------ - 302.9/317.3 MB 3.2 MB/s eta 0:00:05
------------------------------------ - 303.1/317.3 MB 3.2 MB/s eta 0:00:05
------------------------------------ - 303.2/317.3 MB 3.2 MB/s eta 0:00:05
------------------------------------ - 303.3/317.3 MB 3.2 MB/s eta 0:00:05
------------------------------------ - 303.4/317.3 MB 3.2 MB/s eta 0:00:05
------------------------------------ - 303.6/317.3 MB 3.2 MB/s eta 0:00:05
------------------------------------ - 303.7/317.3 MB 3.2 MB/s eta 0:00:05
------------------------------------ - 303.9/317.3 MB 3.2 MB/s eta 0:00:05
------------------------------------ - 304.1/317.3 MB 3.2 MB/s eta 0:00:05
------------------------------------ - 304.3/317.3 MB 3.2 MB/s eta 0:00:05
------------------------------------ - 304.3/317.3 MB 3.2 MB/s eta 0:00:04
------------------------------------ - 304.6/317.3 MB 3.2 MB/s eta 0:00:04
------------------------------------ - 304.8/317.3 MB 3.2 MB/s eta 0:00:04
------------------------------------ - 305.0/317.3 MB 3.2 MB/s eta 0:00:04
------------------------------------ - 305.2/317.3 MB 3.3 MB/s eta 0:00:04
------------------------------------ - 305.4/317.3 MB 3.3 MB/s eta 0:00:04
------------------------------------ - 305.6/317.3 MB 3.3 MB/s eta 0:00:04
------------------------------------ - 305.7/317.3 MB 3.2 MB/s eta 0:00:04
------------------------------------ - 305.8/317.3 MB 3.2 MB/s eta 0:00:04
------------------------------------ - 306.0/317.3 MB 3.2 MB/s eta 0:00:04
------------------------------------ - 306.1/317.3 MB 3.2 MB/s eta 0:00:04
------------------------------------ - 306.3/317.3 MB 3.2 MB/s eta 0:00:04
------------------------------------ - 306.5/317.3 MB 3.2 MB/s eta 0:00:04
------------------------------------ - 306.6/317.3 MB 3.3 MB/s eta 0:00:04
------------------------------------ - 306.7/317.3 MB 3.2 MB/s eta 0:00:04
------------------------------------ - 306.8/317.3 MB 3.2 MB/s eta 0:00:04
------------------------------------ - 307.0/317.3 MB 3.2 MB/s eta 0:00:04
------------------------------------ - 307.2/317.3 MB 3.2 MB/s eta 0:00:04
------------------------------------ - 307.4/317.3 MB 3.2 MB/s eta 0:00:04
------------------------------------ - 307.6/317.3 MB 3.2 MB/s eta 0:00:04
------------------------------------ - 307.8/317.3 MB 3.4 MB/s eta 0:00:03
------------------------------------ - 307.9/317.3 MB 3.3 MB/s eta 0:00:03
------------------------------------ - 308.2/317.3 MB 3.3 MB/s eta 0:00:03
------------------------------------ - 308.3/317.3 MB 3.3 MB/s eta 0:00:03
------------------------------------ - 308.5/317.3 MB 3.3 MB/s eta 0:00:03
------------------------------------ - 308.7/317.3 MB 3.3 MB/s eta 0:00:03
------------------------------------ - 308.8/317.3 MB 3.3 MB/s eta 0:00:03
------------------------------------ - 308.9/317.3 MB 3.3 MB/s eta 0:00:03
------------------------------------- 309.2/317.3 MB 3.3 MB/s eta 0:00:03
------------------------------------- 309.3/317.3 MB 3.3 MB/s eta 0:00:03
------------------------------------- 309.5/317.3 MB 3.3 MB/s eta 0:00:03
------------------------------------- 309.8/317.3 MB 3.4 MB/s eta 0:00:03
------------------------------------- 309.9/317.3 MB 3.4 MB/s eta 0:00:03
------------------------------------- 310.2/317.3 MB 3.4 MB/s eta 0:00:03
------------------------------------- 310.4/317.3 MB 3.4 MB/s eta 0:00:03
------------------------------------- 310.7/317.3 MB 3.4 MB/s eta 0:00:02
------------------------------------- 310.8/317.3 MB 3.4 MB/s eta 0:00:02
------------------------------------- 311.0/317.3 MB 3.4 MB/s eta 0:00:02
------------------------------------- 311.2/317.3 MB 3.4 MB/s eta 0:00:02
------------------------------------- 311.4/317.3 MB 3.4 MB/s eta 0:00:02
------------------------------------- 311.6/317.3 MB 3.4 MB/s eta 0:00:02
------------------------------------- 311.9/317.3 MB 3.5 MB/s eta 0:00:02
------------------------------------- 312.1/317.3 MB 3.5 MB/s eta 0:00:02
------------------------------------- 312.3/317.3 MB 3.5 MB/s eta 0:00:02
------------------------------------- 312.5/317.3 MB 3.7 MB/s eta 0:00:02
------------------------------------- 312.5/317.3 MB 3.6 MB/s eta 0:00:02
------------------------------------- 312.7/317.3 MB 3.6 MB/s eta 0:00:02
------------------------------------- 312.9/317.3 MB 3.7 MB/s eta 0:00:02
------------------------------------- 313.0/317.3 MB 3.7 MB/s eta 0:00:02
------------------------------------- 313.2/317.3 MB 3.7 MB/s eta 0:00:02
------------------------------------- 313.4/317.3 MB 3.7 MB/s eta 0:00:02
------------------------------------- 313.4/317.3 MB 3.6 MB/s eta 0:00:02
------------------------------------- 313.6/317.3 MB 3.7 MB/s eta 0:00:02
------------------------------------- 313.8/317.3 MB 3.7 MB/s eta 0:00:01
------------------------------------- 313.8/317.3 MB 3.7 MB/s eta 0:00:01
------------------------------------- 314.1/317.3 MB 3.7 MB/s eta 0:00:01
------------------------------------- 314.2/317.3 MB 3.7 MB/s eta 0:00:01
------------------------------------- 314.2/317.3 MB 3.7 MB/s eta 0:00:01
------------------------------------- 314.4/317.3 MB 3.6 MB/s eta 0:00:01
------------------------------------- 314.5/317.3 MB 3.6 MB/s eta 0:00:01
------------------------------------- 314.6/317.3 MB 3.6 MB/s eta 0:00:01
------------------------------------- 314.9/317.3 MB 3.6 MB/s eta 0:00:01
------------------------------------- 315.0/317.3 MB 3.6 MB/s eta 0:00:01
------------------------------------- 315.1/317.3 MB 3.6 MB/s eta 0:00:01
------------------------------------- 315.3/317.3 MB 3.6 MB/s eta 0:00:01
------------------------------------- 315.4/317.3 MB 3.6 MB/s eta 0:00:01
------------------------------------- 315.6/317.3 MB 3.5 MB/s eta 0:00:01
------------------------------------- 315.8/317.3 MB 3.6 MB/s eta 0:00:01
------------------------------------- 315.9/317.3 MB 3.6 MB/s eta 0:00:01
------------------------------------- 316.1/317.3 MB 3.6 MB/s eta 0:00:01
------------------------------------- 316.3/317.3 MB 3.6 MB/s eta 0:00:01
------------------------------------- 316.3/317.3 MB 3.5 MB/s eta 0:00:01
------------------------------------- 316.5/317.3 MB 3.6 MB/s eta 0:00:01
------------------------------------- 316.6/317.3 MB 3.5 MB/s eta 0:00:01
------------------------------------- 316.6/317.3 MB 3.4 MB/s eta 0:00:01
------------------------------------- 316.9/317.3 MB 3.5 MB/s eta 0:00:01
------------------------------------- 317.0/317.3 MB 3.6 MB/s eta 0:00:01
------------------------------------- 317.1/317.3 MB 3.5 MB/s eta 0:00:01
------------------------------------- 317.3/317.3 MB 3.5 MB/s eta 0:00:01
------------------------------------- 317.3/317.3 MB 3.5 MB/s eta 0:00:01
------------------------------------- 317.3/317.3 MB 3.5 MB/s eta 0:00:01
------------------------------------- 317.3/317.3 MB 3.5 MB/s eta 0:00:01
------------------------------------- 317.3/317.3 MB 3.5 MB/s eta 0:00:01
------------------------------------- 317.3/317.3 MB 3.5 MB/s eta 0:00:01
------------------------------------- 317.3/317.3 MB 3.5 MB/s eta 0:00:01
------------------------------------- 317.3/317.3 MB 3.5 MB/s eta 0:00:01
------------------------------------- 317.3/317.3 MB 3.5 MB/s eta 0:00:01
------------------------------------- 317.3/317.3 MB 3.5 MB/s eta 0:00:01
------------------------------------- 317.3/317.3 MB 3.5 MB/s eta 0:00:01
------------------------------------- 317.3/317.3 MB 3.5 MB/s eta 0:00:01
------------------------------------- 317.3/317.3 MB 3.5 MB/s eta 0:00:01
------------------------------------- 317.3/317.3 MB 3.5 MB/s eta 0:00:01
------------------------------------- 317.3/317.3 MB 3.5 MB/s eta 0:00:01
------------------------------------- 317.3/317.3 MB 3.5 MB/s eta 0:00:01
------------------------------------- 317.3/317.3 MB 3.5 MB/s eta 0:00:01
------------------------------------- 317.3/317.3 MB 3.5 MB/s eta 0:00:01
------------------------------------- 317.3/317.3 MB 3.5 MB/s eta 0:00:01
------------------------------------- 317.3/317.3 MB 3.5 MB/s eta 0:00:01
------------------------------------- 317.3/317.3 MB 3.5 MB/s eta 0:00:01
------------------------------------- 317.3/317.3 MB 3.5 MB/s eta 0:00:01
------------------------------------- 317.3/317.3 MB 3.5 MB/s eta 0:00:01
------------------------------------- 317.3/317.3 MB 3.5 MB/s eta 0:00:01
------------------------------------- 317.3/317.3 MB 3.5 MB/s eta 0:00:01
------------------------------------- 317.3/317.3 MB 3.5 MB/s eta 0:00:01
-------------------------------------- 317.3/317.3 MB 2.5 MB/s eta 0:00:00
Preparing metadata (setup.py): started
Preparing metadata (setup.py): finished with status 'done'
Requirement already satisfied: lightgbm in d:\anaconda app\lib\site-packages (4.5.0)
ERROR: Could not find a version that satisfies the requirement apache-spark (from versions: none) ERROR: No matching distribution found for apache-spark
Import Libraries¶
In [2]:
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
import plotly.express as px
import geopandas as gpd
import folium
import shapely
import nltk
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import StandardScaler
from sklearn.ensemble import RandomForestClassifier
import tensorflow as tf
from tensorflow import keras
import xgboost as xgb
import lightgbm as lgb
Generate and Load a Custom Dataset¶
In [43]:
# Create mock dataset with equal-length lists (11 entries each)
data = {
'Country': ['USA', 'Russia', 'India', 'China', 'Ukraine', 'Israel', 'Palestine', 'France', 'Germany', 'Pakistan', 'Taiwan'],
'Conflict_Region': ['Mid North America', 'Eastern Europe', 'South Asia', 'East Asia', 'Eastern Europe', 'Middle East', 'Middle East', 'Western Europe', 'Western Europe', 'South West Asia', 'East Asia'],
'Conflict_Type': ['Tension', 'War', 'Tension', 'Potential Conflict', 'War', 'War', 'War', 'Potential Conflict', 'Tension', 'Extreme Tension', 'Potential Conflict'],
'Latitude': [37.0902, 61.5240, 20.5937, 35.8617, 48.3794, 31.0461, 31.9522, 46.6034, 51.1657, 30.3753, 23.6978],
'Longitude': [-95.7129, 105.3188, 78.9629, 104.1954, 31.1656, 34.8516, 35.2332, 1.8883, 10.4515, 69.3451, 121.0200],
'Altitude': [760, 600, 160, 1840, 175, 508, 795, 375, 263, 900, 1150],
'Conflict_Intensity': [10, 20, 30, 25, 15, 18, 5, 12, 22, 16, 14],
'Deaths': np.random.randint(1000, 50000, size=11), # Adjusted to 11 entries
'Economic_Impact_Billion': np.random.uniform(1.5, 100, size=11), # Adjusted to 11 entries
'Environmental_Damage_Index': np.random.uniform(1, 10, size=11), # Adjusted to 11 entries
'UN_Interventions': np.random.choice([1, 2, 3, 4], size=11), # Adjusted to 11 entries
'Total_Population': [331002651, 145912025, 1380004385, 1439323776, 43733762, 8655535, 5000000, 65273511, 83783942, 225199937, 23816775],
'Male_Population': [162000000, 67000000, 705000000, 724000000, 22000000, 4300000, 2500000, 32000000, 41000000, 113000000, 12000000],
'Female_Population': [169000000, 78900000, 675000000, 715000000, 21700000, 4350000, 2500000, 33200000, 42700000, 112000000, 11800000]
}
# Convert to DataFrame
df = pd.DataFrame(data)
# Save to CSV
df.to_csv(r'E:\conflict_data.csv', index=False)
# Set display options to show all rows and columns
pd.set_option('display.max_rows', None) # Display all rows
pd.set_option('display.max_columns', None) # Display all columns
# Display the full DataFrame
print(df)
# Display first few rows
df.head()
Country Conflict_Region Conflict_Type Latitude Longitude \
0 USA Mid North America Tension 37.0902 -95.7129
1 Russia Eastern Europe War 61.5240 105.3188
2 India South Asia Tension 20.5937 78.9629
3 China East Asia Potential Conflict 35.8617 104.1954
4 Ukraine Eastern Europe War 48.3794 31.1656
5 Israel Middle East War 31.0461 34.8516
6 Palestine Middle East War 31.9522 35.2332
7 France Western Europe Potential Conflict 46.6034 1.8883
8 Germany Western Europe Tension 51.1657 10.4515
9 Pakistan South West Asia Extreme Tension 30.3753 69.3451
10 Taiwan East Asia Potential Conflict 23.6978 121.0200
Altitude Conflict_Intensity Deaths Economic_Impact_Billion \
0 760 10 34952 83.810799
1 600 20 19327 71.334938
2 160 30 42936 91.256441
3 1840 25 23452 70.652218
4 175 15 8489 12.682348
5 508 18 13351 62.479122
6 795 5 12032 99.574426
7 375 12 25615 58.227126
8 263 22 40647 75.929998
9 900 16 40744 36.684055
10 1150 14 49027 66.481821
Environmental_Damage_Index UN_Interventions Total_Population \
0 8.855605 1 331002651
1 8.692949 3 145912025
2 5.458306 1 1380004385
3 2.503946 1 1439323776
4 9.768354 1 43733762
5 5.421264 1 8655535
6 4.388213 3 5000000
7 5.902327 2 65273511
8 8.961130 4 83783942
9 9.695679 1 225199937
10 9.098252 3 23816775
Male_Population Female_Population
0 162000000 169000000
1 67000000 78900000
2 705000000 675000000
3 724000000 715000000
4 22000000 21700000
5 4300000 4350000
6 2500000 2500000
7 32000000 33200000
8 41000000 42700000
9 113000000 112000000
10 12000000 11800000
Out[43]:
| Country | Conflict_Region | Conflict_Type | Latitude | Longitude | Altitude | Conflict_Intensity | Deaths | Economic_Impact_Billion | Environmental_Damage_Index | UN_Interventions | Total_Population | Male_Population | Female_Population | |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
| 0 | USA | Mid North America | Tension | 37.0902 | -95.7129 | 760 | 10 | 34952 | 83.810799 | 8.855605 | 1 | 331002651 | 162000000 | 169000000 |
| 1 | Russia | Eastern Europe | War | 61.5240 | 105.3188 | 600 | 20 | 19327 | 71.334938 | 8.692949 | 3 | 145912025 | 67000000 | 78900000 |
| 2 | India | South Asia | Tension | 20.5937 | 78.9629 | 160 | 30 | 42936 | 91.256441 | 5.458306 | 1 | 1380004385 | 705000000 | 675000000 |
| 3 | China | East Asia | Potential Conflict | 35.8617 | 104.1954 | 1840 | 25 | 23452 | 70.652218 | 2.503946 | 1 | 1439323776 | 724000000 | 715000000 |
| 4 | Ukraine | Eastern Europe | War | 48.3794 | 31.1656 | 175 | 15 | 8489 | 12.682348 | 9.768354 | 1 | 43733762 | 22000000 | 21700000 |
Basic Data Exploration¶
In [44]:
df.info()
df.describe()
<class 'pandas.core.frame.DataFrame'> RangeIndex: 11 entries, 0 to 10 Data columns (total 14 columns): # Column Non-Null Count Dtype --- ------ -------------- ----- 0 Country 11 non-null object 1 Conflict_Region 11 non-null object 2 Conflict_Type 11 non-null object 3 Latitude 11 non-null float64 4 Longitude 11 non-null float64 5 Altitude 11 non-null int64 6 Conflict_Intensity 11 non-null int64 7 Deaths 11 non-null int32 8 Economic_Impact_Billion 11 non-null float64 9 Environmental_Damage_Index 11 non-null float64 10 UN_Interventions 11 non-null int32 11 Total_Population 11 non-null int64 12 Male_Population 11 non-null int64 13 Female_Population 11 non-null int64 dtypes: float64(4), int32(2), int64(5), object(3) memory usage: 1.2+ KB
Out[44]:
| Latitude | Longitude | Altitude | Conflict_Intensity | Deaths | Economic_Impact_Billion | Environmental_Damage_Index | UN_Interventions | Total_Population | Male_Population | Female_Population | |
|---|---|---|---|---|---|---|---|---|---|---|---|
| count | 11.000000 | 11.000000 | 11.000000 | 11.000000 | 11.000000 | 11.000000 | 11.000000 | 11.000000 | 1.100000e+01 | 1.100000e+01 | 1.100000e+01 |
| mean | 38.026318 | 45.156318 | 684.181818 | 17.000000 | 28233.818182 | 66.283027 | 7.158730 | 1.909091 | 3.410642e+08 | 1.713455e+08 | 1.696500e+08 |
| std | 12.507829 | 61.570688 | 496.251512 | 7.071068 | 14096.619090 | 24.505970 | 2.496426 | 1.136182 | 5.377658e+08 | 2.729668e+08 | 2.647921e+08 |
| min | 20.593700 | -95.712900 | 160.000000 | 5.000000 | 8489.000000 | 12.682348 | 2.503946 | 1.000000 | 5.000000e+06 | 2.500000e+06 | 2.500000e+06 |
| 25% | 30.710700 | 20.808550 | 319.000000 | 13.000000 | 16339.000000 | 60.353124 | 5.439785 | 1.000000 | 3.377527e+07 | 1.700000e+07 | 1.675000e+07 |
| 50% | 35.861700 | 35.233200 | 600.000000 | 16.000000 | 25615.000000 | 70.652218 | 8.692949 | 1.000000 | 8.378394e+07 | 4.100000e+07 | 4.270000e+07 |
| 75% | 47.491400 | 91.579150 | 847.500000 | 21.000000 | 40695.500000 | 79.870399 | 9.029691 | 3.000000 | 2.781013e+08 | 1.375000e+08 | 1.405000e+08 |
| max | 61.524000 | 121.020000 | 1840.000000 | 30.000000 | 49027.000000 | 99.574426 | 9.768354 | 4.000000 | 1.439324e+09 | 7.240000e+08 | 7.150000e+08 |
Data Cleaning¶
In [45]:
# Checking for missing values
print(df.isnull().sum())
# Handling missing values (if any)
df.fillna(0, inplace=True)
Country 0 Conflict_Region 0 Conflict_Type 0 Latitude 0 Longitude 0 Altitude 0 Conflict_Intensity 0 Deaths 0 Economic_Impact_Billion 0 Environmental_Damage_Index 0 UN_Interventions 0 Total_Population 0 Male_Population 0 Female_Population 0 dtype: int64
Data Wrangling¶
In [36]:
# Convert columns to appropriate types if necessary
df['Deaths'] = df['Deaths'].astype(int)
df['Economic_Impact_Billion'] = df['Economic_Impact_Billion'].astype(float)
Bar Chart: Conflict Types by Country¶
In [14]:
import matplotlib.pyplot as plt
import seaborn as sns
plt.figure(figsize=(10, 6))
sns.countplot(data=df, x='Country', hue='Conflict_Type')
plt.title('Conflict Types by Country')
plt.xticks(rotation=45)
plt.show()
Heatmap: Correlation of Economic and Environmental Impact¶
In [15]:
corr = df[['Deaths', 'Economic_Impact_Billion', 'Environmental_Damage_Index']].corr()
sns.heatmap(corr, annot=True, cmap='coolwarm')
plt.title('Correlation Heatmap of Deaths, Economic Impact, and Environmental Damage')
plt.show()
Pie Chart: Distribution of UN Interventions¶
In [16]:
df['UN_Interventions'].value_counts().plot.pie(autopct='%1.1f%%', figsize=(8, 8))
plt.title('UN Interventions Distribution')
plt.ylabel('')
plt.show()
3D Geopolitical Map with Plotly and Folium¶
In [1]:
import pandas as pd
import plotly.graph_objects as go
# Sample data
data = {
'Country': ['USA', 'Russia', 'India', 'China', 'Ukraine', 'Israel', 'Palestine', 'France', 'Germany', 'Pakistan', 'Taiwan'],
'Conflict_Type': ['Tension', 'War', 'Tension', 'Potential Conflict', 'War', 'War', 'War', 'Potential Conflict', 'Tension', 'Extreme Tension', 'Potential Conflict']
}
# Creating a DataFrame
df = pd.DataFrame(data)
# Coordinates for the countries (latitude and longitude)
coordinates = {
'USA': (37.0902, -95.7129),
'Russia': (61.5240, 105.3180),
'India': (20.5937, 78.9629),
'China': (35.8617, 104.1954),
'Ukraine': (48.3794, 31.1656),
'Israel': (31.0461, 34.8516),
'Palestine': (31.9522, 35.2332),
'France': (46.6034, 1.8883),
'Germany': (51.1657, 10.4515),
'Pakistan': (30.3753, 69.3451),
'Taiwan': (23.6978, 120.9605)
}
# Adding coordinates to the DataFrame
df['Latitude'] = df['Country'].map(lambda country: coordinates[country][0])
df['Longitude'] = df['Country'].map(lambda country: coordinates[country][1])
# Color mapping for Conflict_Type
color_map = {
'Tension': 'yellow',
'War': 'red',
'Potential Conflict': 'orange',
'Extreme Tension': 'purple'
}
# Create a scatter plot for the circles
fig = go.Figure()
for index, row in df.iterrows():
fig.add_trace(go.Scattergeo(
lon=[row['Longitude']],
lat=[row['Latitude']],
text=row['Country'],
mode='markers+text',
marker=dict(
size=12,
color=color_map[row['Conflict_Type']],
line=dict(width=2), # Thicker outline for the markers
opacity=0.7
),
textposition="top center",
textfont=dict(
family="Arial",
size=12,
color="black" # Set country names to extreme black
)
))
# Add custom legend with small colored boxes
annotations = [
dict(
x=0.95, y=1, # Coordinates of the legend box in the top-right corner
xref='paper', yref='paper',
showarrow=False,
text='<b>Legend</b>',
font=dict(size=14, color='black'),
align='left'
),
dict(
x=0.95, y=0.95, # Position for 'Tension'
xref='paper', yref='paper',
showarrow=False,
text='Tension',
font=dict(size=12, color='black'),
bgcolor='yellow', # Box color for 'Tension'
bordercolor='black',
align='left'
),
dict(
x=0.95, y=0.90, # Position for 'War'
xref='paper', yref='paper',
showarrow=False,
text='War',
font=dict(size=12, color='black'),
bgcolor='red', # Box color for 'War'
bordercolor='black',
align='left'
),
dict(
x=0.95, y=0.85, # Position for 'Potential Conflict'
xref='paper', yref='paper',
showarrow=False,
text='Potential Conflict',
font=dict(size=12, color='black'),
bgcolor='orange', # Box color for 'Potential Conflict'
bordercolor='black',
align='left'
),
dict(
x=0.95, y=0.80, # Position for 'Extreme Tension'
xref='paper', yref='paper',
showarrow=False,
text='Extreme Tension',
font=dict(size=12, color='black'),
bgcolor='purple', # Box color for 'Extreme Tension'
bordercolor='black',
align='left'
)
]
# Update layout for the map
fig.update_layout(
title='Global Conflict Mapping',
geo=dict(
scope='world',
showland=True,
landcolor='grey', # Color of the land
oceancolor='blue', # Color of the oceans
countrycolor='black', # Color of the country boundaries
coastlinecolor='black',
projection_type='natural earth',
center={"lat": 20, "lon": 0}, # Center of the map
projection_scale=3.5, # Increase/decrease size of the map
),
height=700,
width=1200,
annotations=annotations # Add the annotations for the legend
)
# Show the figure
fig.show()
In [19]:
import pandas as pd
import folium
from folium.plugins import MarkerCluster
import plotly.express as px
import numpy as np
from branca.element import Template, MacroElement
# Sample data
data = {
'Country': ['USA', 'Russia', 'India', 'China', 'Ukraine', 'Israel', 'Palestine', 'France', 'Germany', 'Pakistan', 'Taiwan'],
'Conflict_Type': ['Tension', 'War', 'Tension', 'Potential Conflict', 'War', 'War', 'War', 'Potential Conflict', 'Tension', 'Extreme Tension', 'Potential Conflict'],
}
# Create a DataFrame
df = pd.DataFrame(data)
# Define geographical coordinates for countries
geo_coords = {
'USA': [37.0902, -95.7129],
'Russia': [61.5240, 105.3188],
'India': [20.5937, 78.9629],
'China': [35.8617, 104.1954],
'Ukraine': [48.3794, 31.1656],
'Israel': [31.0461, 34.8516],
'Palestine': [31.9522, 35.2332],
'France': [46.6034, 1.8883],
'Germany': [51.1657, 10.4515],
'Pakistan': [30.3753, 69.3451],
'Taiwan': [23.6978, 120.9605]
}
# Define colors for each country based on conflict type
conflict_colors = {
'Tension': 'yellow',
'War': 'red',
'Potential Conflict': 'orange',
'Extreme Tension': 'purple',
}
# Initialize Folium map centered at a global perspective
m = folium.Map(location=[20, 0], zoom_start=2, control_scale=True)
# Define ocean and land color using Tile Layer
folium.TileLayer('CartoDB positron').add_to(m)
# Add circles, labels and tooltips with country names in extreme black color
for index, row in df.iterrows():
country = row['Country']
conflict_type = row['Conflict_Type']
# Get coordinates for the country
coords = geo_coords[country]
# Create a circle marker for each country
folium.CircleMarker(
location=coords,
radius=10, # You can adjust the radius to increase/decrease size
color=conflict_colors[conflict_type],
fill=True,
fill_opacity=0.6,
popup=f"<b>{country}</b>: {conflict_type}",
tooltip=folium.Tooltip(f"<span style='color:black;'><b>{country}</b></span>") # Tooltip with country name in black color
).add_to(m)
# Draw country boundaries with thicker outline
folium.GeoJson(
'https://raw.githubusercontent.com/python-visualization/folium/master/examples/data/world-countries.json',
style_function=lambda x: {
'fillColor': 'grey',
'color': 'black',
'weight': 2, # Thicker outline for country borders
'fillOpacity': 0.6,
}
).add_to(m)
# Add a color legend to the top right corner of the map
legend_html = '''
<div style="
position: fixed;
bottom: 50px; left: 50px; width: 150px; height: 120px;
background-color: white; z-index:9999; font-size:14px;
border:2px solid grey;
">
<b>Conflict Legend</b> <br>
<i class="fa fa-circle" style="color:yellow"></i> Tension<br>
<i class="fa fa-circle" style="color:red"></i> War<br>
<i class="fa fa-circle" style="color:orange"></i> Potential Conflict<br>
<i class="fa fa-circle" style="color:purple"></i> Extreme Tension<br>
</div>
'''
m.get_root().html.add_child(folium.Element(legend_html))
# Display the map
m.save('3D_Geopolitical_Map_with_Legend.html')
m
Out[19]:
Make this Notebook Trusted to load map: File -> Trust Notebook
In [ ]:
Choropleth Map: UN Interventions by Country¶
In [15]:
import pandas as pd
import folium
from folium.features import GeoJsonTooltip
# Sample data
data = {
'Country': ['USA', 'Russia', 'India', 'China', 'Ukraine', 'Israel', 'Palestine', 'France', 'Germany', 'Pakistan', 'Taiwan'],
'Conflict_Type': ['Tension', 'War', 'Tension', 'Potential Conflict', 'War', 'War', 'War', 'Potential Conflict', 'Tension', 'Extreme Tension', 'Potential Conflict']
}
# Create a DataFrame
df = pd.DataFrame(data)
# Define geographical coordinates for the countries
geo_coords = {
'USA': [37.0902, -95.7129],
'Russia': [61.5240, 105.3188],
'India': [20.5937, 78.9629],
'China': [35.8617, 104.1954],
'Ukraine': [48.3794, 31.1656],
'Israel': [31.0461, 34.8516],
'Palestine': [31.9522, 35.2332],
'France': [46.6034, 1.8883],
'Germany': [51.1657, 10.4515],
'Pakistan': [30.3753, 69.3451],
'Taiwan': [23.6978, 120.9605]
}
# Define random colors for each country for the circle markers
country_colors = {
'USA': 'green',
'Russia': 'blue',
'India': 'orange',
'China': 'purple',
'Ukraine': 'red',
'Israel': 'yellow',
'Palestine': 'pink',
'France': 'cyan',
'Germany': 'magenta',
'Pakistan': 'brown',
'Taiwan': 'lime'
}
# Initialize a Folium map with global coordinates
m = folium.Map(location=[20, 0], zoom_start=2, control_scale=True, tiles=None)
# Add a custom tile layer for ocean and land coloring
folium.TileLayer(
tiles='Stamen Toner', # You can also try 'CartoDB positron' or 'Stamen Watercolor'
name='Toner',
control=False
).add_to(m)
# Add country boundaries (GeoJson layer)
geo_json_url = 'https://raw.githubusercontent.com/python-visualization/folium/master/examples/data/world-countries.json'
folium.GeoJson(
geo_json_url,
style_function=lambda feature: {
'fillColor': 'grey', # Land color
'color': 'black', # Country borders color
'weight': 2, # Thicker borders
'fillOpacity': 0.6,
},
tooltip=GeoJsonTooltip(
fields=['name'],
aliases=['Country:'],
localize=True,
sticky=True,
labels=True,
style=(
"background-color: white; color: black; font-family: "
"arial; font-size: 12px; padding: 10px;"
)
)
).add_to(m)
# Add circle markers for each country
for country, coords in geo_coords.items():
folium.CircleMarker(
location=coords,
radius=10,
color=country_colors[country],
fill=True,
fill_opacity=0.8,
popup=f"<b>{country}</b>: {df[df['Country'] == country]['Conflict_Type'].values[0]}",
tooltip=folium.Tooltip(f"<span style='color:black;'><b>{country}</b></span>") # Country name in black when hovered
).add_to(m)
# Save the map to an HTML file and display
m.save('Global_Conflict_Map_with_UN_Involvement.html')
m
Out[15]:
Make this Notebook Trusted to load map: File -> Trust Notebook
In [ ]:
In [14]:
import pandas as pd
import folium
# Sample data
data = {
'Country': ['USA', 'Russia', 'India', 'China', 'Ukraine',
'Israel', 'Palestine', 'France', 'Germany',
'Pakistan', 'Taiwan'],
'Conflict_Type': ['Tension', 'War', 'Tension', 'Potential Conflict',
'War', 'War', 'War', 'Potential Conflict',
'Tension', 'Extreme Tension', 'Potential Conflict'],
}
# Create a DataFrame
df = pd.DataFrame(data)
# Define geographical coordinates for countries
geo_coords = {
'USA': [37.0902, -95.7129],
'Russia': [61.5240, 105.3188],
'India': [20.5937, 78.9629],
'China': [35.8617, 104.1954],
'Ukraine': [48.3794, 31.1656],
'Israel': [31.0461, 34.8516],
'Palestine': [31.9522, 35.2332],
'France': [46.6034, 1.8883],
'Germany': [51.1657, 10.4515],
'Pakistan': [30.3753, 69.3451],
'Taiwan': [23.6978, 120.9605]
}
# Define colors for circles based on countries
circle_colors = {
'USA': 'blue',
'Russia': 'red',
'India': 'green',
'China': 'yellow',
'Ukraine': 'purple',
'Israel': 'orange',
'Palestine': 'pink',
'France': 'cyan',
'Germany': 'magenta',
'Pakistan': 'lime',
'Taiwan': 'teal'
}
# Initialize Folium map with a larger size and specific location
m = folium.Map(location=[20, 0], zoom_start=2, control_scale=True,
tiles='CartoDB positron')
# Add GeoJson layer for countries without borders (same color as land)
folium.GeoJson(
'https://raw.githubusercontent.com/python-visualization/folium/master/examples/data/world-countries.json',
style_function=lambda feature: {
'fillColor': 'grey', # Color of land
'color': 'grey', # Make outline color the same as land color
'weight': 0, # No weight for the outline to effectively remove it
'fillOpacity': 0.6,
}
).add_to(m)
# Add circles and country names on the map
for index, row in df.iterrows():
country = row['Country']
coords = geo_coords[country]
# Create a circle marker for each country
folium.CircleMarker(
location=coords,
radius=10,
color=circle_colors[country],
fill=True,
fill_opacity=0.6,
popup=f"<b>{country}</b>: {row['Conflict_Type']}",
).add_to(m)
# Display the map
m.save('UN_Interventions_Choropleth_Map.html')
m
Out[14]:
Make this Notebook Trusted to load map: File -> Trust Notebook
In [ ]:
Data Preprocessing for Machine Learning¶
In [32]:
import pandas as pd
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import StandardScaler
# Assuming df is already defined and loaded with the data
# One-hot encode categorical columns
X = pd.get_dummies(df[['Country', 'Conflict_Region', 'UN_Interventions']])
# Target column
y = df['Conflict_Type']
# Split the data
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.3, random_state=42)
# Scale the numerical features
scaler = StandardScaler()
X_train_scaled = scaler.fit_transform(X_train)
X_test_scaled = scaler.transform(X_test)
import pandas as pd
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import StandardScaler
# Assuming df is already defined and loaded with the data
# One-hot encode categorical columns
X = pd.get_dummies(df[['Country', 'Conflict_Region', 'UN_Interventions']])
# Target column
y = df['Conflict_Type']
# Split the data
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.3, random_state=42)
# Scale the numerical features
scaler = StandardScaler()
X_train_scaled = scaler.fit_transform(X_train)
X_test_scaled = scaler.transform(X_test)
print("X_train_scaled : ", X_train_scaled)
print("\n\n\nX_test_scaled : ", X_test_scaled)
X_train_scaled : [[ 0.2773501 -0.40824829 -0.40824829 -0.40824829 2.44948974 0. 0. -0.40824829 -0.40824829 0. 0. -0.40824829 -0.40824829 -0.63245553 0. -0.40824829 2.44948974 0. -0.63245553] [-0.69337525 -0.40824829 -0.40824829 -0.40824829 -0.40824829 0. 0. -0.40824829 2.44948974 0. 0. -0.40824829 -0.40824829 1.58113883 0. -0.40824829 -0.40824829 0. -0.63245553] [ 1.24807544 -0.40824829 -0.40824829 2.44948974 -0.40824829 0. 0. -0.40824829 -0.40824829 0. 0. -0.40824829 -0.40824829 -0.63245553 0. -0.40824829 -0.40824829 0. 1.58113883] [ 1.24807544 -0.40824829 -0.40824829 -0.40824829 -0.40824829 0. 0. -0.40824829 -0.40824829 0. 0. 2.44948974 -0.40824829 1.58113883 0. -0.40824829 -0.40824829 0. -0.63245553] [-1.66410059 -0.40824829 2.44948974 -0.40824829 -0.40824829 0. 0. -0.40824829 -0.40824829 0. 0. -0.40824829 -0.40824829 -0.63245553 0. -0.40824829 -0.40824829 0. 1.58113883] [-0.69337525 2.44948974 -0.40824829 -0.40824829 -0.40824829 0. 0. -0.40824829 -0.40824829 0. 0. -0.40824829 2.44948974 -0.63245553 0. -0.40824829 -0.40824829 0. -0.63245553] [ 0.2773501 -0.40824829 -0.40824829 -0.40824829 -0.40824829 0. 0. 2.44948974 -0.40824829 0. 0. -0.40824829 -0.40824829 -0.63245553 0. 2.44948974 -0.40824829 0. -0.63245553]] X_test_scaled : [[-0.69337525 -0.40824829 -0.40824829 -0.40824829 -0.40824829 1. 0. -0.40824829 -0.40824829 0. 0. -0.40824829 -0.40824829 -0.63245553 0. 2.44948974 -0.40824829 0. -0.63245553] [-1.66410059 -0.40824829 -0.40824829 -0.40824829 -0.40824829 0. 0. -0.40824829 -0.40824829 0. 1. -0.40824829 -0.40824829 -0.63245553 1. -0.40824829 -0.40824829 0. -0.63245553] [ 1.24807544 -0.40824829 -0.40824829 -0.40824829 -0.40824829 0. 1. -0.40824829 -0.40824829 0. 0. -0.40824829 -0.40824829 -0.63245553 0. -0.40824829 -0.40824829 1. -0.63245553] [-0.69337525 -0.40824829 -0.40824829 -0.40824829 -0.40824829 0. 0. -0.40824829 -0.40824829 1. 0. -0.40824829 2.44948974 -0.63245553 0. -0.40824829 -0.40824829 0. -0.63245553]]
Machine Learning Models for Conflict Prediction : Train-Test Split and Random Forest Model¶
In [33]:
from sklearn.model_selection import train_test_split
from sklearn.ensemble import RandomForestClassifier
from sklearn.metrics import classification_report
# Prepare data
X = df[['Deaths', 'Economic_Impact_Billion', 'Environmental_Damage_Index']]
y = df['Conflict_Type']
# Train-Test Split
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.3, random_state=42)
# Random Forest Classifier
model = RandomForestClassifier(n_estimators=100)
model.fit(X_train, y_train)
# Predictions
y_pred = model.predict(X_test)
# Evaluation
print(classification_report(y_test, y_pred))
precision recall f1-score support
Extreme Tension 0.00 0.00 0.00 1
Potential Conflict 0.50 1.00 0.67 1
Tension 0.00 0.00 0.00 1
War 0.00 0.00 0.00 1
accuracy 0.25 4
macro avg 0.12 0.25 0.17 4
weighted avg 0.12 0.25 0.17 4
D:\Anaconda app\Lib\site-packages\sklearn\metrics\_classification.py:1509: UndefinedMetricWarning: Precision is ill-defined and being set to 0.0 in labels with no predicted samples. Use `zero_division` parameter to control this behavior. D:\Anaconda app\Lib\site-packages\sklearn\metrics\_classification.py:1509: UndefinedMetricWarning: Precision is ill-defined and being set to 0.0 in labels with no predicted samples. Use `zero_division` parameter to control this behavior. D:\Anaconda app\Lib\site-packages\sklearn\metrics\_classification.py:1509: UndefinedMetricWarning: Precision is ill-defined and being set to 0.0 in labels with no predicted samples. Use `zero_division` parameter to control this behavior.
Feature Engineering for Machine Learning Models¶
In [41]:
df['Conflict_Binary'] = df['Conflict_Region'].apply(lambda x: 1 if x == 'Yes' else 0)
df['UN_Interventions'] = df['UN_Interventions'].apply(lambda x: 1 if x == 'Yes' else 0)
In [32]:
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
from sklearn.preprocessing import MinMaxScaler
# Sample dataset creation (this would be your actual dataset)
data = {
'Country': ['USA', 'Russia', 'India', 'China', 'Ukraine',
'Israel', 'Palestine', 'France', 'Germany',
'Pakistan', 'Taiwan'],
'Conflict_Type': ['Tension', 'War', 'Tension', 'Potential Conflict',
'War', 'War', 'War', 'Potential Conflict',
'Tension', 'Extreme Tension', 'Potential Conflict'],
'Environmental_Damage_Index': np.random.uniform(1, 10, size=11), # Adjusted to 11 entries
'UN_Interventions': np.random.choice([1, 2, 3, 4], size=11), # Adjusted to 11 entries
'Conflict_Intensity': [10, 20, 30, 25, 15, 18, 5, 12, 22, 16, 14],
}
# Create a DataFrame
df = pd.DataFrame(data)
# 1. Handling Missing Values
# Filling missing values with median (assuming numerical columns)
df['Environmental_Damage_Index'] = df['Environmental_Damage_Index'].fillna(df['Environmental_Damage_Index'].median())
# 2. Encoding Categorical Variables
df = pd.get_dummies(df, columns=['Conflict_Type'], drop_first=True)
# 3. Feature Scaling (Normalization)
scaler = MinMaxScaler()
df['Environmental_Damage_Index'] = scaler.fit_transform(df[['Environmental_Damage_Index']])
# 4. Creating a New Feature: Conflict Intensity (example based on existing data)
df['Conflict_Intensity'] = df['Environmental_Damage_Index'] * df['UN_Interventions']
# 5. Display the processed DataFrame
print("Processed DataFrame:")
print(df)
# 6. Data Visualization: Distribution of Conflict Types
plt.figure(figsize=(10, 6))
sns.countplot(data=df, x='Conflict_Type_War', palette='viridis', hue='Conflict_Type_War', legend=False)
plt.title('Distribution of Conflict Types')
plt.xlabel('Conflict Type (War = 1, Others = 0)')
plt.ylabel('Count')
plt.xticks(ticks=[0, 1], labels=['No War', 'War'])
plt.grid(axis='y')
plt.show()
Processed DataFrame:
Country Environmental_Damage_Index UN_Interventions \
0 USA 0.705343 1
1 Russia 0.517885 3
2 India 0.654023 2
3 China 1.000000 2
4 Ukraine 0.427901 1
5 Israel 0.408247 2
6 Palestine 0.439919 2
7 France 0.988644 2
8 Germany 0.518003 2
9 Pakistan 0.000000 4
10 Taiwan 0.532715 2
Conflict_Intensity Conflict_Type_Potential Conflict \
0 0.705343 False
1 1.553656 False
2 1.308046 False
3 2.000000 True
4 0.427901 False
5 0.816495 False
6 0.879839 False
7 1.977288 True
8 1.036006 False
9 0.000000 False
10 1.065429 True
Conflict_Type_Tension Conflict_Type_War
0 True False
1 False True
2 True False
3 False False
4 False True
5 False True
6 False True
7 False False
8 True False
9 False False
10 False False
Model Selection - Logistic Regression¶
In [42]:
from sklearn.linear_model import LogisticRegression
log_reg = LogisticRegression()
log_reg.fit(X_train_scaled, y_train)
Out[42]:
LogisticRegression()In a Jupyter environment, please rerun this cell to show the HTML representation or trust the notebook.
On GitHub, the HTML representation is unable to render, please try loading this page with nbviewer.org.
LogisticRegression()
In [35]:
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
from sklearn.model_selection import train_test_split
from sklearn.linear_model import LogisticRegression
from sklearn.metrics import confusion_matrix, classification_report, accuracy_score
# Sample dataset creation using provided data
data = {
'Country': ['USA', 'Russia', 'India', 'China', 'Ukraine', 'Israel', 'Palestine',
'France', 'Germany', 'Pakistan', 'Taiwan'],
'Conflict_Region': ['Mid North America', 'Eastern Europe', 'South Asia', 'East Asia',
'Eastern Europe', 'Middle East', 'Middle East', 'Western Europe',
'Western Europe', 'South West Asia', 'East Asia'],
'Conflict_Type': ['Tension', 'War', 'Tension', 'Potential Conflict', 'War',
'War', 'War', 'Potential Conflict', 'Tension', 'Extreme Tension',
'Potential Conflict'],
'Latitude': [37.0902, 61.5240, 20.5937, 35.8617, 48.3794,
31.0461, 31.9522, 46.6034, 51.1657, 30.3753, 23.6978],
'Longitude': [-95.7129, 105.3188, 78.9629, 104.1954, 31.1656,
34.8516, 35.2332, 1.8883, 10.4515, 69.3451, 121.0200],
'Altitude': [760, 600, 160, 1840, 175, 508, 795, 375, 263, 900, 1150],
'Conflict_Intensity': [10, 20, 30, 25, 15, 18, 5, 12, 22, 16, 14],
'Deaths': np.random.randint(1000, 50000, size=11),
'Economic_Impact_Billion': np.random.uniform(1.5, 100, size=11),
'Environmental_Damage_Index': np.random.uniform(1, 10, size=11),
'UN_Interventions': np.random.choice([1, 2, 3, 4], size=11),
'Total_Population': [331002651, 145912025, 1380004385, 1439323776, 43733762,
8655535, 5000000, 65273511, 83783942, 225199937, 23816775],
'Male_Population': [162000000, 67000000, 705000000, 724000000, 22000000,
4300000, 2500000, 32000000, 41000000, 113000000, 12000000],
'Female_Population': [169000000, 78900000, 675000000, 715000000, 21700000,
4350000, 2500000, 33200000, 42700000, 112000000, 11800000]
}
# Create a DataFrame
df = pd.DataFrame(data)
# Encoding target variable as binary (1 for 'War', 0 for other types)
df['Conflict_Type'] = df['Conflict_Type'].apply(lambda x: 1 if x == 'War' else 0)
# Defining features and target variable
X = df[['Latitude', 'Longitude', 'Altitude', 'Conflict_Intensity',
'Deaths', 'Economic_Impact_Billion', 'Environmental_Damage_Index',
'UN_Interventions', 'Total_Population']]
y = df['Conflict_Type']
# Splitting the dataset into training and testing sets
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.3, random_state=42)
# Creating and training the Logistic Regression model
model = LogisticRegression()
model.fit(X_train, y_train)
# Making predictions
y_pred = model.predict(X_test)
# Evaluating the model
accuracy = accuracy_score(y_test, y_pred)
conf_matrix = confusion_matrix(y_test, y_pred)
class_report = classification_report(y_test, y_pred)
# Displaying evaluation results
print(f"Accuracy of the Logistic Regression model: {accuracy:.2f}")
print("Confusion Matrix:")
print(conf_matrix)
print("Classification Report:")
print(class_report)
# Visualization of the Confusion Matrix
plt.figure(figsize=(8, 6))
sns.heatmap(conf_matrix, annot=True, fmt='d', cmap='Blues',
xticklabels=['No War', 'War'], yticklabels=['No War', 'War'])
plt.title('Confusion Matrix')
plt.xlabel('Predicted')
plt.ylabel('Actual')
plt.show()
Accuracy of the Logistic Regression model: 1.00
Confusion Matrix:
[[3 0]
[0 1]]
Classification Report:
precision recall f1-score support
0 1.00 1.00 1.00 3
1 1.00 1.00 1.00 1
accuracy 1.00 4
macro avg 1.00 1.00 1.00 4
weighted avg 1.00 1.00 1.00 4
Evaluate Logistic Regression Model¶
In [43]:
from sklearn.metrics import classification_report
y_pred_log = log_reg.predict(X_test_scaled)
print(classification_report(y_test, y_pred_log))
precision recall f1-score support
Extreme Tension 0.00 0.00 0.00 1
Potential Conflict 0.50 1.00 0.67 1
Tension 0.00 0.00 0.00 1
War 0.50 1.00 0.67 1
accuracy 0.50 4
macro avg 0.25 0.50 0.33 4
weighted avg 0.25 0.50 0.33 4
D:\Anaconda app\Lib\site-packages\sklearn\metrics\_classification.py:1509: UndefinedMetricWarning: Precision is ill-defined and being set to 0.0 in labels with no predicted samples. Use `zero_division` parameter to control this behavior. D:\Anaconda app\Lib\site-packages\sklearn\metrics\_classification.py:1509: UndefinedMetricWarning: Precision is ill-defined and being set to 0.0 in labels with no predicted samples. Use `zero_division` parameter to control this behavior. D:\Anaconda app\Lib\site-packages\sklearn\metrics\_classification.py:1509: UndefinedMetricWarning: Precision is ill-defined and being set to 0.0 in labels with no predicted samples. Use `zero_division` parameter to control this behavior.
In [34]:
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
from sklearn.model_selection import train_test_split
from sklearn.linear_model import LogisticRegression
from sklearn.metrics import confusion_matrix, classification_report, accuracy_score, roc_curve, auc
# Sample dataset creation
data = {
'Country': ['USA', 'Russia', 'India', 'China', 'Ukraine', 'Israel', 'Palestine',
'France', 'Germany', 'Pakistan', 'Taiwan'],
'Conflict_Region': ['Mid North America', 'Eastern Europe', 'South Asia', 'East Asia',
'Eastern Europe', 'Middle East', 'Middle East', 'Western Europe',
'Western Europe', 'South West Asia', 'East Asia'],
'Conflict_Type': ['Tension', 'War', 'Tension', 'Potential Conflict', 'War',
'War', 'War', 'Potential Conflict', 'Tension', 'Extreme Tension',
'Potential Conflict'],
'Latitude': [37.0902, 61.5240, 20.5937, 35.8617, 48.3794, 31.0461, 31.9522,
46.6034, 51.1657, 30.3753, 23.6978],
'Longitude': [-95.7129, 105.3188, 78.9629, 104.1954, 31.1656, 34.8516, 35.2332,
1.8883, 10.4515, 69.3451, 121.0200],
'Altitude': [760, 600, 160, 1840, 175, 508, 795, 375, 263, 900, 1150],
'Conflict_Intensity': [10, 20, 30, 25, 15, 18, 5, 12, 22, 16, 14],
'Deaths': np.random.randint(1000, 50000, size=11),
'Economic_Impact_Billion': np.random.uniform(1.5, 100, size=11),
'Environmental_Damage_Index': np.random.uniform(1, 10, size=11),
'UN_Interventions': np.random.choice([1, 2, 3, 4], size=11),
'Total_Population': [331002651, 145912025, 1380004385, 1439323776, 43733762,
8655535, 5000000, 65273511, 83783942, 225199937, 23816775],
'Male_Population': [162000000, 67000000, 705000000, 724000000, 22000000,
4300000, 2500000, 32000000, 41000000, 113000000, 12000000],
'Female_Population': [169000000, 78900000, 675000000, 715000000, 21700000,
4350000, 2500000, 33200000, 42700000, 112000000, 11800000]
}
# Create a DataFrame
df = pd.DataFrame(data)
# Encoding target variable as binary (1 for 'War', 0 for other types)
df['Conflict_Type'] = df['Conflict_Type'].apply(lambda x: 1 if x == 'War' else 0)
# Defining features and target variable
X = df[['Latitude', 'Longitude', 'Altitude', 'Conflict_Intensity', 'Deaths',
'Economic_Impact_Billion', 'Environmental_Damage_Index', 'UN_Interventions']]
y = df['Conflict_Type']
# Splitting the dataset into training and testing sets
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.3, random_state=42)
# Creating and training the Logistic Regression model
model = LogisticRegression()
model.fit(X_train, y_train)
# Making predictions
y_pred = model.predict(X_test)
# Evaluating the model
accuracy = accuracy_score(y_test, y_pred)
conf_matrix = confusion_matrix(y_test, y_pred)
class_report = classification_report(y_test, y_pred)
# Displaying evaluation results
print(f"Accuracy of the Logistic Regression model: {accuracy:.2f}")
print("Confusion Matrix:")
print(conf_matrix)
print("Classification Report:")
print(class_report)
# ROC Curve
fpr, tpr, thresholds = roc_curve(y_test, model.predict_proba(X_test)[:, 1])
roc_auc = auc(fpr, tpr)
# Plotting the ROC Curve
plt.figure(figsize=(8, 6))
plt.plot(fpr, tpr, color='blue', lw=2, label=f'ROC Curve (area = {roc_auc:.2f})')
plt.plot([0, 1], [0, 1], color='red', linestyle='--')
plt.xlim([0.0, 1.0])
plt.ylim([0.0, 1.0])
plt.xlabel('False Positive Rate')
plt.ylabel('True Positive Rate')
plt.title('Receiver Operating Characteristic (ROC) Curve')
plt.legend(loc='lower right')
plt.grid()
plt.show()
D:\Anaconda app\Lib\site-packages\sklearn\linear_model\_logistic.py:469: ConvergenceWarning:
lbfgs failed to converge (status=1):
STOP: TOTAL NO. of ITERATIONS REACHED LIMIT.
Increase the number of iterations (max_iter) or scale the data as shown in:
https://scikit-learn.org/stable/modules/preprocessing.html
Please also refer to the documentation for alternative solver options:
https://scikit-learn.org/stable/modules/linear_model.html#logistic-regression
Accuracy of the Logistic Regression model: 0.50
Confusion Matrix:
[[2 1]
[1 0]]
Classification Report:
precision recall f1-score support
0 0.67 0.67 0.67 3
1 0.00 0.00 0.00 1
accuracy 0.50 4
macro avg 0.33 0.33 0.33 4
weighted avg 0.50 0.50 0.50 4
Hyperparameter Tuning for Logistic Regression¶
In [2]:
from sklearn.model_selection import GridSearchCV, StratifiedKFold
from sklearn.linear_model import LogisticRegression
from sklearn.preprocessing import StandardScaler
from sklearn.model_selection import train_test_split
import pandas as pd
# Example dataset (replace this with your actual dataset)
data = {'Feature1': [1, 2, 3, 4, 5, 6, 7, 8],
'Feature2': [5, 6, 7, 8, 9, 10, 11, 12],
'Target': [0, 1, 0, 1, 0, 1, 0, 1]} # Make sure you have enough samples per class
df = pd.DataFrame(data)
# Features and target
X = df[['Feature1', 'Feature2']]
y = df['Target']
# Split data into train and test sets
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.3, random_state=42)
# Standardize the features
scaler = StandardScaler()
X_train_scaled = scaler.fit_transform(X_train)
X_test_scaled = scaler.transform(X_test)
# Initialize logistic regression model
log_reg = LogisticRegression()
# Define parameter grid
param_grid = {'C': [0.1, 1, 10, 100]}
# Use StratifiedKFold for better handling of imbalanced classes (if applicable)
# Make sure the number of splits is smaller than or equal to the number of samples in the smallest class
cv = StratifiedKFold(n_splits=2) # Adjust n_splits based on your dataset
# Grid search with cross-validation
grid_search = GridSearchCV(log_reg, param_grid, cv=cv)
grid_search.fit(X_train_scaled, y_train)
# Print the best parameters
print(grid_search.best_params_)
{'C': 0.1}
Model Selection - Random Forest Classifier¶
In [3]:
from sklearn.ensemble import RandomForestClassifier
rf = RandomForestClassifier(random_state=42)
rf.fit(X_train, y_train)
Out[3]:
RandomForestClassifier(random_state=42)In a Jupyter environment, please rerun this cell to show the HTML representation or trust the notebook.
On GitHub, the HTML representation is unable to render, please try loading this page with nbviewer.org.
RandomForestClassifier(random_state=42)
Evaluate Random Forest Classifier¶
In [5]:
# Import the necessary libraries
from sklearn.ensemble import RandomForestClassifier
from sklearn.metrics import classification_report
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import StandardScaler
import pandas as pd
# Example dataset (replace with your actual dataset)
data = {'Feature1': [1, 2, 3, 4, 5, 6, 7, 8],
'Feature2': [5, 6, 7, 8, 9, 10, 11, 12],
'Target': [0, 1, 0, 1, 0, 1, 0, 1]} # Ensure there are sufficient samples for each class
df = pd.DataFrame(data)
# Features and target
X = df[['Feature1', 'Feature2']]
y = df['Target']
# Split the dataset into training and test sets
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.3, random_state=42)
# Standardize the features
scaler = StandardScaler()
X_train_scaled = scaler.fit_transform(X_train)
X_test_scaled = scaler.transform(X_test)
# Train a Random Forest Classifier
rf = RandomForestClassifier(random_state=42)
rf.fit(X_train_scaled, y_train)
# Make predictions
y_pred_rf = rf.predict(X_test_scaled)
# Print the classification report
print(classification_report(y_test, y_pred_rf))
precision recall f1-score support
0 0.33 1.00 0.50 1
1 0.00 0.00 0.00 2
accuracy 0.33 3
macro avg 0.17 0.50 0.25 3
weighted avg 0.11 0.33 0.17 3
D:\Anaconda app\Lib\site-packages\sklearn\metrics\_classification.py:1509: UndefinedMetricWarning: Precision is ill-defined and being set to 0.0 in labels with no predicted samples. Use `zero_division` parameter to control this behavior.
_warn_prf(average, modifier, f"{metric.capitalize()} is", len(result))
D:\Anaconda app\Lib\site-packages\sklearn\metrics\_classification.py:1509: UndefinedMetricWarning: Precision is ill-defined and being set to 0.0 in labels with no predicted samples. Use `zero_division` parameter to control this behavior.
_warn_prf(average, modifier, f"{metric.capitalize()} is", len(result))
D:\Anaconda app\Lib\site-packages\sklearn\metrics\_classification.py:1509: UndefinedMetricWarning: Precision is ill-defined and being set to 0.0 in labels with no predicted samples. Use `zero_division` parameter to control this behavior.
_warn_prf(average, modifier, f"{metric.capitalize()} is", len(result))
Feature Importance for Random Forest¶
In [6]:
import matplotlib.pyplot as plt
import seaborn as sns
feature_importances = rf.feature_importances_
sns.barplot(x=feature_importances, y=X.columns)
plt.title('Feature Importance in Random Forest')
plt.show()
Deep Learning Model - Neural Network¶
In [7]:
import tensorflow as tf
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense
model = Sequential([
Dense(64, activation='relu', input_shape=(X_train_scaled.shape[1],)),
Dense(32, activation='relu'),
Dense(1, activation='sigmoid')
])
model.compile(optimizer='adam', loss='binary_crossentropy', metrics=['accuracy'])
model.fit(X_train_scaled, y_train, epochs=10, batch_size=32)
D:\Anaconda app\Lib\site-packages\keras\src\layers\core\dense.py:87: UserWarning: Do not pass an `input_shape`/`input_dim` argument to a layer. When using Sequential models, prefer using an `Input(shape)` object as the first layer in the model instead. super().__init__(activity_regularizer=activity_regularizer, **kwargs)
Epoch 1/10 1/1 ━━━━━━━━━━━━━━━━━━━━ 4s 4s/step - accuracy: 0.4000 - loss: 0.6907 Epoch 2/10 1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 47ms/step - accuracy: 0.6000 - loss: 0.6861 Epoch 3/10 1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 47ms/step - accuracy: 0.6000 - loss: 0.6819 Epoch 4/10 1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 47ms/step - accuracy: 0.6000 - loss: 0.6782 Epoch 5/10 1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 63ms/step - accuracy: 0.6000 - loss: 0.6747 Epoch 6/10 1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 63ms/step - accuracy: 0.6000 - loss: 0.6716 Epoch 7/10 1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 63ms/step - accuracy: 0.6000 - loss: 0.6688 Epoch 8/10 1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 63ms/step - accuracy: 0.6000 - loss: 0.6663 Epoch 9/10 1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 78ms/step - accuracy: 0.6000 - loss: 0.6640 Epoch 10/10 1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 78ms/step - accuracy: 0.6000 - loss: 0.6621
Out[7]:
<keras.src.callbacks.history.History at 0x1a9da2792b0>
Evaluate Neural Network Model¶
In [1]:
# Import necessary libraries
import numpy as np
import seaborn as sns
import matplotlib.pyplot as plt
import pandas as pd
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import StandardScaler
# Sample dataset for model training (replace with real data)
# For simplicity, we're creating random data for X and y
np.random.seed(42)
X = np.random.rand(100, 10) # 100 samples, 10 features
y = np.random.randint(2, size=100) # Binary target variable
# Split the dataset into training and testing sets
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=42)
# Scale the data
scaler = StandardScaler()
X_train_scaled = scaler.fit_transform(X_train)
X_test_scaled = scaler.transform(X_test)
# Define the model
model = Sequential([
Dense(32, activation='relu', input_shape=(X_train_scaled.shape[1],)),
Dense(16, activation='relu'),
Dense(1, activation='sigmoid')
])
# Compile the model
model.compile(optimizer='adam', loss='binary_crossentropy', metrics=['accuracy'])
# Train the model
model.fit(X_train_scaled, y_train, epochs=10, batch_size=10, verbose=0)
# Evaluate the model
test_loss, test_acc = model.evaluate(X_test_scaled, y_test, verbose=0)
print(f'Test Accuracy: {test_acc:.4f}')
# Create a Violin Plot for conflict severity across regions
# Sample dataset for the violin plot
data = {
'region': ['Africa', 'Asia', 'Europe', 'America', 'Africa', 'Asia', 'Europe', 'America'],
'conflict_severity': [8, 6, 5, 9, 7, 6, 4, 8]
}
# Convert to DataFrame
df = pd.DataFrame(data)
# Violin plot for conflict severity across regions
plt.figure(figsize=(10, 6))
sns.violinplot(x='region', y='conflict_severity', data=df, palette='Set2')
# Add labels and title
plt.title('Distribution of Conflict Severity Across Regions', fontsize=16)
plt.xlabel('Region', fontsize=12)
plt.ylabel('Conflict Severity', fontsize=12)
# Show the plot
plt.show()
D:\Anaconda app\Lib\site-packages\keras\src\layers\core\dense.py:87: UserWarning: Do not pass an `input_shape`/`input_dim` argument to a layer. When using Sequential models, prefer using an `Input(shape)` object as the first layer in the model instead. super().__init__(activity_regularizer=activity_regularizer, **kwargs)
Test Accuracy: 0.5000
C:\Users\pradu\AppData\Local\Temp\ipykernel_9672\264341051.py:54: FutureWarning: Passing `palette` without assigning `hue` is deprecated and will be removed in v0.14.0. Assign the `x` variable to `hue` and set `legend=False` for the same effect. sns.violinplot(x='region', y='conflict_severity', data=df, palette='Set2')
Predictive Modeling using Time Series Analysis¶
In [10]:
# Import necessary libraries
import pandas as pd
from statsmodels.tsa.arima.model import ARIMA
import matplotlib.pyplot as plt
# Create a simple time series data (example)
# In practice, you can load data from a CSV or another source.
dates = pd.date_range(start='2020-01-01', periods=100, freq='D')
values = [i + (i * 0.5) for i in range(100)] # Example linear trend
time_series_data = pd.Series(values, index=dates)
# Visualize the time series
plt.figure(figsize=(10, 6))
time_series_data.plot(title="Sample Time Series Data")
plt.show()
# Fit the ARIMA model
model = ARIMA(time_series_data, order=(1, 1, 1)) # Example ARIMA(1,1,1) model
model_fit = model.fit()
# Print the summary of the ARIMA model
print(model_fit.summary())
D:\Anaconda app\Lib\site-packages\statsmodels\tsa\statespace\sarimax.py:966: UserWarning: Non-stationary starting autoregressive parameters found. Using zeros as starting parameters.
warn('Non-stationary starting autoregressive parameters'
SARIMAX Results
==============================================================================
Dep. Variable: y No. Observations: 100
Model: ARIMA(1, 1, 1) Log Likelihood 547.941
Date: Mon, 30 Sep 2024 AIC -1089.881
Time: 01:23:06 BIC -1082.096
Sample: 01-01-2020 HQIC -1086.731
- 04-09-2020
Covariance Type: opg
==============================================================================
coef std err z P>|z| [0.025 0.975]
------------------------------------------------------------------------------
ar.L1 0.9999 1.39e-05 7.2e+04 0.000 1.000 1.000
ma.L1 0.9997 6.84e-12 1.46e+11 0.000 1.000 1.000
sigma2 7.898e-07 1.59e-07 4.966 0.000 4.78e-07 1.1e-06
===================================================================================
Ljung-Box (L1) (Q): 0.00 Jarque-Bera (JB): 38039.41
Prob(Q): 0.99 Prob(JB): 0.00
Heteroskedasticity (H): 0.00 Skew: 9.80
Prob(H) (two-sided): 0.00 Kurtosis: 97.01
===================================================================================
Warnings:
[1] Covariance matrix calculated using the outer product of gradients (complex-step).
[2] Covariance matrix is singular or near-singular, with condition number 3.21e+25. Standard errors may be unstable.
D:\Anaconda app\Lib\site-packages\statsmodels\base\model.py:607: ConvergenceWarning: Maximum Likelihood optimization failed to converge. Check mle_retvals
warnings.warn("Maximum Likelihood optimization failed to "
NLP - Sentiment Analysis on UN Documents¶
In [2]:
# Import necessary libraries
from nltk.sentiment.vader import SentimentIntensityAnalyzer
import matplotlib.pyplot as plt
import seaborn as sns
# Initialize the Sentiment Intensity Analyzer
sid = SentimentIntensityAnalyzer()
# Sample UN resolution text (replace with actual UN document text)
text = "The United Nations seeks to promote global peace and security by addressing conflicts through diplomacy and international law."
# Perform Sentiment Analysis
sentiment_scores = sid.polarity_scores(text)
print("Sentiment Scores:", sentiment_scores)
# Data Visualization - Bar Chart of Sentiment Scores
# Convert sentiment scores into two lists: keys and values
labels = list(sentiment_scores.keys())
scores = list(sentiment_scores.values())
# Create a bar chart for sentiment analysis
plt.figure(figsize=(8, 6))
sns.barplot(x=labels, y=scores, palette='Blues_d')
# Add title and labels
plt.title('Sentiment Analysis of UN Resolution Text', fontsize=16)
plt.xlabel('Sentiment Categories', fontsize=12)
plt.ylabel('Scores', fontsize=12)
# Show the plot
plt.show()
Sentiment Scores: {'neg': 0.097, 'neu': 0.483, 'pos': 0.42, 'compound': 0.8271}
C:\Users\pradu\AppData\Local\Temp\ipykernel_9672\3918096282.py:23: FutureWarning: Passing `palette` without assigning `hue` is deprecated and will be removed in v0.14.0. Assign the `x` variable to `hue` and set `legend=False` for the same effect. sns.barplot(x=labels, y=scores, palette='Blues_d')
3D Geographical Visualization using Plotly and Basemap and Geopandas¶
In [13]:
import folium
import pandas as pd
# Data for the countries with altitude
data = {
'Country': [
'USA', 'Russia', 'India', 'China', 'Ukraine',
'Israel', 'Palestine', 'France', 'Germany', 'Pakistan', 'Taiwan'
],
'Latitude': [
37.0902, 61.5240, 20.5937, 35.8617, 48.3794,
31.0461, 31.9522, 46.6034, 51.1657, 30.3753, 23.6978
],
'Longitude': [
-95.7129, 105.3188, 78.9629, 104.1954, 31.1656,
34.8516, 35.2332, 1.8883, 10.4515, 69.3451, 121.0200
],
'Altitude': [
760, 600, 160, 1840, 175,
508, 795, 375, 263, 900, 1150 # Actual average altitudes in meters
],
'Conflict_Intensity': [
10, 20, 30, 25, 15, 18, 5, 12, 22, 16, 14 # Example conflict intensity values
]
}
# Create a DataFrame
df = pd.DataFrame(data)
# Create a map object centered around the geographic mid-point
m = folium.Map(location=[20, 0], zoom_start=2)
# Add markers for each country
for i, row in df.iterrows():
folium.Marker(
location=[row['Latitude'], row['Longitude']],
popup=(
f"<b>Country:</b> {row['Country']}<br>"
f"<b>Altitude:</b> {row['Altitude']} meters<br>"
f"<b>Conflict Intensity:</b> {row['Conflict_Intensity']}"
),
tooltip=row['Country'],
icon=folium.Icon(color='blue' if row['Conflict_Intensity'] < 15 else 'red')
).add_to(m)
# Save map to an HTML file and display it
m.save('3d_map_countries.html')
m
Out[13]:
Make this Notebook Trusted to load map: File -> Trust Notebook
In [41]:
pip install basemap
Collecting basemapNote: you may need to restart the kernel to use updated packages.
Downloading basemap-1.4.1-cp312-cp312-win_amd64.whl.metadata (9.3 kB)
Collecting basemap-data<1.4,>=1.3.2 (from basemap)
Downloading basemap_data-1.3.2-py2.py3-none-any.whl.metadata (2.7 kB)
Requirement already satisfied: pyshp<2.4,>=1.2 in d:\anaconda app\lib\site-packages (from basemap) (2.3.1)
Requirement already satisfied: matplotlib<3.9,>=1.5 in d:\anaconda app\lib\site-packages (from basemap) (3.8.4)
Requirement already satisfied: pyproj<3.7.0,>=1.9.3 in d:\anaconda app\lib\site-packages (from basemap) (3.6.1)
Requirement already satisfied: packaging<24.0,>=16.0 in d:\anaconda app\lib\site-packages (from basemap) (23.2)
Requirement already satisfied: numpy<1.27,>=1.21 in d:\anaconda app\lib\site-packages (from basemap) (1.26.4)
Requirement already satisfied: contourpy>=1.0.1 in d:\anaconda app\lib\site-packages (from matplotlib<3.9,>=1.5->basemap) (1.2.0)
Requirement already satisfied: cycler>=0.10 in d:\anaconda app\lib\site-packages (from matplotlib<3.9,>=1.5->basemap) (0.11.0)
Requirement already satisfied: fonttools>=4.22.0 in d:\anaconda app\lib\site-packages (from matplotlib<3.9,>=1.5->basemap) (4.51.0)
Requirement already satisfied: kiwisolver>=1.3.1 in d:\anaconda app\lib\site-packages (from matplotlib<3.9,>=1.5->basemap) (1.4.4)
Requirement already satisfied: pillow>=8 in d:\anaconda app\lib\site-packages (from matplotlib<3.9,>=1.5->basemap) (10.3.0)
Requirement already satisfied: pyparsing>=2.3.1 in d:\anaconda app\lib\site-packages (from matplotlib<3.9,>=1.5->basemap) (3.0.9)
Requirement already satisfied: python-dateutil>=2.7 in d:\anaconda app\lib\site-packages (from matplotlib<3.9,>=1.5->basemap) (2.9.0.post0)
Requirement already satisfied: certifi in d:\anaconda app\lib\site-packages (from pyproj<3.7.0,>=1.9.3->basemap) (2024.8.30)
Requirement already satisfied: six>=1.5 in d:\anaconda app\lib\site-packages (from python-dateutil>=2.7->matplotlib<3.9,>=1.5->basemap) (1.16.0)
Downloading basemap-1.4.1-cp312-cp312-win_amd64.whl (507 kB)
---------------------------------------- 0.0/507.6 kB ? eta -:--:--
- ------------------------------------- 20.5/507.6 kB 682.7 kB/s eta 0:00:01
--- ----------------------------------- 41.0/507.6 kB 393.8 kB/s eta 0:00:02
------- ------------------------------- 92.2/507.6 kB 655.4 kB/s eta 0:00:01
------------ ------------------------- 163.8/507.6 kB 821.4 kB/s eta 0:00:01
-------------------- ------------------- 256.0/507.6 kB 1.1 MB/s eta 0:00:01
----------------------------------- ---- 450.6/507.6 kB 1.7 MB/s eta 0:00:01
---------------------------------------- 507.6/507.6 kB 1.6 MB/s eta 0:00:00
Downloading basemap_data-1.3.2-py2.py3-none-any.whl (30.5 MB)
---------------------------------------- 0.0/30.5 MB ? eta -:--:--
---------------------------------------- 0.2/30.5 MB 5.1 MB/s eta 0:00:06
--------------------------------------- 0.6/30.5 MB 7.0 MB/s eta 0:00:05
--------------------------------------- 0.7/30.5 MB 5.1 MB/s eta 0:00:06
- -------------------------------------- 1.1/30.5 MB 5.7 MB/s eta 0:00:06
- -------------------------------------- 1.3/30.5 MB 5.6 MB/s eta 0:00:06
-- ------------------------------------- 1.7/30.5 MB 5.9 MB/s eta 0:00:05
-- ------------------------------------- 2.0/30.5 MB 6.0 MB/s eta 0:00:05
--- ------------------------------------ 2.3/30.5 MB 6.1 MB/s eta 0:00:05
--- ------------------------------------ 2.6/30.5 MB 6.2 MB/s eta 0:00:05
--- ------------------------------------ 3.0/30.5 MB 6.3 MB/s eta 0:00:05
---- ----------------------------------- 3.3/30.5 MB 6.3 MB/s eta 0:00:05
---- ----------------------------------- 3.5/30.5 MB 6.2 MB/s eta 0:00:05
---- ----------------------------------- 3.8/30.5 MB 6.1 MB/s eta 0:00:05
----- ---------------------------------- 4.1/30.5 MB 6.2 MB/s eta 0:00:05
----- ---------------------------------- 4.3/30.5 MB 6.1 MB/s eta 0:00:05
----- ---------------------------------- 4.4/30.5 MB 6.0 MB/s eta 0:00:05
------ --------------------------------- 4.6/30.5 MB 5.7 MB/s eta 0:00:05
------ --------------------------------- 4.9/30.5 MB 5.8 MB/s eta 0:00:05
------ --------------------------------- 5.3/30.5 MB 5.8 MB/s eta 0:00:05
------- -------------------------------- 5.6/30.5 MB 5.8 MB/s eta 0:00:05
------- -------------------------------- 5.9/30.5 MB 5.9 MB/s eta 0:00:05
-------- ------------------------------- 6.2/30.5 MB 5.9 MB/s eta 0:00:05
-------- ------------------------------- 6.5/30.5 MB 6.0 MB/s eta 0:00:05
-------- ------------------------------- 6.8/30.5 MB 6.0 MB/s eta 0:00:04
--------- ------------------------------ 7.1/30.5 MB 6.0 MB/s eta 0:00:04
--------- ------------------------------ 7.4/30.5 MB 6.0 MB/s eta 0:00:04
---------- ----------------------------- 7.7/30.5 MB 6.0 MB/s eta 0:00:04
---------- ----------------------------- 8.0/30.5 MB 6.0 MB/s eta 0:00:04
---------- ----------------------------- 8.3/30.5 MB 6.0 MB/s eta 0:00:04
----------- ---------------------------- 8.7/30.5 MB 6.1 MB/s eta 0:00:04
----------- ---------------------------- 9.0/30.5 MB 6.1 MB/s eta 0:00:04
------------ --------------------------- 9.3/30.5 MB 6.2 MB/s eta 0:00:04
------------ --------------------------- 9.7/30.5 MB 6.2 MB/s eta 0:00:04
------------ --------------------------- 9.9/30.5 MB 6.2 MB/s eta 0:00:04
------------- -------------------------- 10.2/30.5 MB 6.1 MB/s eta 0:00:04
------------- -------------------------- 10.5/30.5 MB 6.2 MB/s eta 0:00:04
------------- -------------------------- 10.6/30.5 MB 6.0 MB/s eta 0:00:04
-------------- ------------------------- 10.9/30.5 MB 6.1 MB/s eta 0:00:04
-------------- ------------------------- 11.2/30.5 MB 6.1 MB/s eta 0:00:04
-------------- ------------------------- 11.3/30.5 MB 6.0 MB/s eta 0:00:04
--------------- ------------------------ 11.6/30.5 MB 6.1 MB/s eta 0:00:04
--------------- ------------------------ 11.9/30.5 MB 6.0 MB/s eta 0:00:04
--------------- ------------------------ 12.0/30.5 MB 5.9 MB/s eta 0:00:04
---------------- ----------------------- 12.3/30.5 MB 5.9 MB/s eta 0:00:04
---------------- ----------------------- 12.6/30.5 MB 5.8 MB/s eta 0:00:04
---------------- ----------------------- 12.7/30.5 MB 5.7 MB/s eta 0:00:04
----------------- ---------------------- 13.0/30.5 MB 5.8 MB/s eta 0:00:04
----------------- ---------------------- 13.2/30.5 MB 5.8 MB/s eta 0:00:03
----------------- ---------------------- 13.5/30.5 MB 5.7 MB/s eta 0:00:03
------------------ --------------------- 13.8/30.5 MB 5.7 MB/s eta 0:00:03
------------------ --------------------- 14.0/30.5 MB 5.8 MB/s eta 0:00:03
------------------ --------------------- 14.2/30.5 MB 5.6 MB/s eta 0:00:03
------------------ --------------------- 14.4/30.5 MB 5.6 MB/s eta 0:00:03
------------------- -------------------- 14.6/30.5 MB 5.6 MB/s eta 0:00:03
------------------- -------------------- 14.8/30.5 MB 5.7 MB/s eta 0:00:03
------------------- -------------------- 15.2/30.5 MB 5.7 MB/s eta 0:00:03
-------------------- ------------------- 15.3/30.5 MB 5.6 MB/s eta 0:00:03
-------------------- ------------------- 15.7/30.5 MB 5.6 MB/s eta 0:00:03
-------------------- ------------------- 16.0/30.5 MB 5.6 MB/s eta 0:00:03
--------------------- ------------------ 16.1/30.5 MB 5.6 MB/s eta 0:00:03
--------------------- ------------------ 16.4/30.5 MB 5.5 MB/s eta 0:00:03
--------------------- ------------------ 16.7/30.5 MB 5.5 MB/s eta 0:00:03
--------------------- ------------------ 16.8/30.5 MB 5.5 MB/s eta 0:00:03
---------------------- ----------------- 17.1/30.5 MB 5.4 MB/s eta 0:00:03
---------------------- ----------------- 17.3/30.5 MB 5.4 MB/s eta 0:00:03
---------------------- ----------------- 17.5/30.5 MB 5.3 MB/s eta 0:00:03
----------------------- ---------------- 17.8/30.5 MB 5.3 MB/s eta 0:00:03
----------------------- ---------------- 18.0/30.5 MB 5.3 MB/s eta 0:00:03
----------------------- ---------------- 18.3/30.5 MB 5.2 MB/s eta 0:00:03
------------------------ --------------- 18.5/30.5 MB 5.2 MB/s eta 0:00:03
------------------------ --------------- 18.7/30.5 MB 5.2 MB/s eta 0:00:03
------------------------ --------------- 18.9/30.5 MB 5.1 MB/s eta 0:00:03
------------------------- -------------- 19.2/30.5 MB 5.1 MB/s eta 0:00:03
------------------------- -------------- 19.5/30.5 MB 5.1 MB/s eta 0:00:03
------------------------- -------------- 19.5/30.5 MB 5.1 MB/s eta 0:00:03
------------------------- -------------- 19.8/30.5 MB 5.0 MB/s eta 0:00:03
-------------------------- ------------- 20.1/30.5 MB 5.0 MB/s eta 0:00:03
-------------------------- ------------- 20.2/30.5 MB 5.0 MB/s eta 0:00:03
-------------------------- ------------- 20.4/30.5 MB 4.9 MB/s eta 0:00:03
--------------------------- ------------ 20.8/30.5 MB 5.0 MB/s eta 0:00:02
--------------------------- ------------ 21.0/30.5 MB 5.0 MB/s eta 0:00:02
--------------------------- ------------ 21.1/30.5 MB 4.8 MB/s eta 0:00:02
---------------------------- ----------- 21.4/30.5 MB 4.9 MB/s eta 0:00:02
---------------------------- ----------- 21.6/30.5 MB 5.0 MB/s eta 0:00:02
---------------------------- ----------- 21.7/30.5 MB 4.8 MB/s eta 0:00:02
---------------------------- ----------- 22.0/30.5 MB 4.9 MB/s eta 0:00:02
----------------------------- ---------- 22.3/30.5 MB 4.9 MB/s eta 0:00:02
----------------------------- ---------- 22.4/30.5 MB 4.9 MB/s eta 0:00:02
----------------------------- ---------- 22.7/30.5 MB 4.9 MB/s eta 0:00:02
------------------------------ --------- 23.0/30.5 MB 4.9 MB/s eta 0:00:02
------------------------------ --------- 23.1/30.5 MB 4.9 MB/s eta 0:00:02
------------------------------ --------- 23.4/30.5 MB 4.8 MB/s eta 0:00:02
------------------------------- -------- 23.7/30.5 MB 4.9 MB/s eta 0:00:02
------------------------------- -------- 23.9/30.5 MB 4.8 MB/s eta 0:00:02
------------------------------- -------- 24.0/30.5 MB 4.8 MB/s eta 0:00:02
------------------------------- -------- 24.2/30.5 MB 4.7 MB/s eta 0:00:02
------------------------------- -------- 24.3/30.5 MB 4.8 MB/s eta 0:00:02
------------------------------- -------- 24.4/30.5 MB 4.7 MB/s eta 0:00:02
-------------------------------- ------- 24.7/30.5 MB 4.7 MB/s eta 0:00:02
-------------------------------- ------- 25.1/30.5 MB 4.8 MB/s eta 0:00:02
-------------------------------- ------- 25.1/30.5 MB 4.8 MB/s eta 0:00:02
--------------------------------- ------ 25.4/30.5 MB 4.6 MB/s eta 0:00:02
--------------------------------- ------ 25.7/30.5 MB 4.7 MB/s eta 0:00:02
--------------------------------- ------ 25.8/30.5 MB 4.7 MB/s eta 0:00:02
---------------------------------- ----- 26.0/30.5 MB 4.6 MB/s eta 0:00:01
---------------------------------- ----- 26.3/30.5 MB 4.6 MB/s eta 0:00:01
---------------------------------- ----- 26.4/30.5 MB 4.7 MB/s eta 0:00:01
---------------------------------- ----- 26.6/30.5 MB 4.6 MB/s eta 0:00:01
----------------------------------- ---- 26.9/30.5 MB 4.6 MB/s eta 0:00:01
----------------------------------- ---- 27.1/30.5 MB 4.7 MB/s eta 0:00:01
----------------------------------- ---- 27.2/30.5 MB 4.6 MB/s eta 0:00:01
------------------------------------ --- 27.5/30.5 MB 4.6 MB/s eta 0:00:01
------------------------------------ --- 27.7/30.5 MB 4.6 MB/s eta 0:00:01
------------------------------------ --- 27.8/30.5 MB 4.5 MB/s eta 0:00:01
------------------------------------ --- 28.2/30.5 MB 4.5 MB/s eta 0:00:01
------------------------------------- -- 28.4/30.5 MB 4.6 MB/s eta 0:00:01
------------------------------------- -- 28.5/30.5 MB 4.5 MB/s eta 0:00:01
------------------------------------- -- 28.8/30.5 MB 4.5 MB/s eta 0:00:01
-------------------------------------- - 29.0/30.5 MB 4.6 MB/s eta 0:00:01
-------------------------------------- - 29.1/30.5 MB 4.5 MB/s eta 0:00:01
-------------------------------------- - 29.4/30.5 MB 4.5 MB/s eta 0:00:01
-------------------------------------- - 29.7/30.5 MB 4.5 MB/s eta 0:00:01
-------------------------------------- - 29.7/30.5 MB 4.4 MB/s eta 0:00:01
--------------------------------------- 30.0/30.5 MB 4.5 MB/s eta 0:00:01
--------------------------------------- 30.3/30.5 MB 4.5 MB/s eta 0:00:01
--------------------------------------- 30.3/30.5 MB 4.5 MB/s eta 0:00:01
--------------------------------------- 30.5/30.5 MB 4.5 MB/s eta 0:00:01
--------------------------------------- 30.5/30.5 MB 4.5 MB/s eta 0:00:01
--------------------------------------- 30.5/30.5 MB 4.5 MB/s eta 0:00:01
---------------------------------------- 30.5/30.5 MB 4.3 MB/s eta 0:00:00
Installing collected packages: basemap-data, basemap
Successfully installed basemap-1.4.1 basemap-data-1.3.2
2D Spherical Globe¶
In [45]:
import matplotlib.pyplot as plt
from mpl_toolkits.basemap import Basemap
import numpy as np
def draw_globe():
# Create a new figure
fig = plt.figure(figsize=(10, 10))
# Create a Basemap instance for a globe projection
m = Basemap(projection='ortho', lat_0=0, lon_0=0)
# Draw coastlines, countries, and the edges of the map
m.drawcoastlines()
m.drawcountries()
m.drawmapboundary(fill_color='aqua') # Color for oceans
m.fillcontinents(color='lightgreen', lake_color='aqua')
# Add a title
plt.title('Spherical Movable Globe', fontsize=20)
# Save the figure to the specified path
plt.savefig("E:\\spherical_globe.png", bbox_inches='tight', dpi=300)
# Display the globe
plt.show()
# Call the function to draw the globe
draw_globe()
3D Spherical Globe¶
In [2]:
import plotly.graph_objects as go
import pandas as pd
# Load country data
url = "https://raw.githubusercontent.com/plotly/datasets/master/2014_world_gdp_with_codes.csv"
df = pd.read_csv(url)
# Inspect the DataFrame to check the column names
print(df.head()) # Print the first few rows of the DataFrame for verification
# Check the columns in the DataFrame
print("Columns in the DataFrame:", df.columns)
# Example mapping of countries to coordinates (for demonstration)
coordinates = {
'United States': (-95.71, 37.09),
'China': (104.19, 35.86),
'India': (78.96, 20.59),
'Brazil': (-51.93, -14.24),
'Australia': (133.77, -25.27),
# Add more countries and their coordinates as needed
}
# Create Longitude and Latitude columns using the mapping
df['Longitude'] = df['COUNTRY'].map(lambda x: coordinates.get(x, (None, None))[0])
df['Latitude'] = df['COUNTRY'].map(lambda x: coordinates.get(x, (None, None))[1])
# Create a scatter plot for the globe
fig = go.Figure()
# Add a globe surface
fig.add_trace(go.Scattergeo(
lon=df['Longitude'],
lat=df['Latitude'],
text=df['COUNTRY'],
mode='markers',
marker=dict(size=6, color='blue', opacity=0.7),
))
# Customize layout for the globe
fig.update_layout(
title='Interactive 3D Earth',
geo=dict(
projection_type='orthographic',
showland=True,
landcolor='lightgreen',
subunitcolor='white',
countrycolor='white',
showocean=True,
oceancolor='aqua',
showcountries=True,
),
updatemenus=[
{
'buttons': [
{
'label': 'Zoom In',
'method': 'relayout',
'args': ['geo.projection.scale', 1.2] # Increase scale for zooming in
},
{
'label': 'Zoom Out',
'method': 'relayout',
'args': ['geo.projection.scale', 0.8] # Decrease scale for zooming out
}
],
'direction': 'down',
'showactive': True,
'x': 0.1,
'xanchor': 'left',
'y': 1.1,
'yanchor': 'top'
}
],
width=1400, # Increase width to triple the size
height=1400, # Increase height to triple the size
)
# Function to rotate the globe to the specified country
def rotate_globe(country_name):
# Get the coordinates of the specified country
country_data = df[df['COUNTRY'] == country_name]
if not country_data.empty:
lon = country_data['Longitude'].values[0]
lat = country_data['Latitude'].values[0]
# Update camera position to focus on the specified country
fig.update_layout(
geo=dict(
projection_type='orthographic',
center=dict(lat=lat, lon=lon),
)
)
fig.show()
else:
print("Country not found. Please enter a valid country name.")
# Display the globe
fig.show()
COUNTRY GDP (BILLIONS) CODE 0 Afghanistan 21.71 AFG 1 Albania 13.40 ALB 2 Algeria 227.80 DZA 3 American Samoa 0.75 ASM 4 Andorra 4.80 AND Columns in the DataFrame: Index(['COUNTRY', 'GDP (BILLIONS)', 'CODE'], dtype='object')
In [3]:
import plotly.graph_objects as go
import pandas as pd
from geopy.geocoders import Nominatim
import time
# Load country data
url = "https://raw.githubusercontent.com/plotly/datasets/master/2014_world_gdp_with_codes.csv"
df = pd.read_csv(url)
# Inspect the DataFrame to check the column names
print(df.head()) # Print the first few rows of the DataFrame for verification
# Check the columns in the DataFrame
print("Columns in the DataFrame:", df.columns)
# Initialize geocoder
geolocator = Nominatim(user_agent="geoapiExercises")
# Function to get coordinates for a country
def get_coordinates(country_name):
try:
location = geolocator.geocode(country_name)
return (location.longitude, location.latitude) if location else (None, None)
except Exception as e:
print(f"Error getting coordinates for {country_name}: {e}")
return (None, None)
# Get coordinates for each country in the DataFrame
coordinates = df['COUNTRY'].apply(get_coordinates)
df['Longitude'] = coordinates.apply(lambda x: x[0])
df['Latitude'] = coordinates.apply(lambda x: x[1])
# Add a small delay to avoid overwhelming the geocoding service
time.sleep(1)
# Create a scatter plot for the globe
fig = go.Figure()
# Add a globe surface with country names
fig.add_trace(go.Scattergeo(
lon=df['Longitude'],
lat=df['Latitude'],
text=df['COUNTRY'],
mode='text', # Use 'text' mode to display country names
textfont=dict(size=8, color='blue'),
))
# Customize layout for the globe
fig.update_layout(
title='Interactive 3D Earth with Country Names',
geo=dict(
projection_type='orthographic',
showland=True,
landcolor='lightgreen',
subunitcolor='white',
countrycolor='white',
showocean=True,
oceancolor='aqua',
showcountries=True,
),
updatemenus=[
{
'buttons': [
{
'label': 'Zoom In',
'method': 'relayout',
'args': ['geo.projection.scale', 1.2] # Increase scale for zooming in
},
{
'label': 'Zoom Out',
'method': 'relayout',
'args': ['geo.projection.scale', 0.8] # Decrease scale for zooming out
}
],
'direction': 'down',
'showactive': True,
'x': 0.1,
'xanchor': 'left',
'y': 1.1,
'yanchor': 'top'
}
],
width=1400, # Increase width to triple the size
height=1400, # Increase height to triple the size
)
# Function to rotate the globe to the specified country
def rotate_globe(country_name):
# Get the coordinates of the specified country
country_data = df[df['COUNTRY'] == country_name]
if not country_data.empty:
lon = country_data['Longitude'].values[0]
lat = country_data['Latitude'].values[0]
# Update camera position to focus on the specified country
fig.update_layout(
geo=dict(
projection_type='orthographic',
center=dict(lat=lat, lon=lon),
)
)
fig.show()
else:
print("Country not found. Please enter a valid country name.")
# Display the globe
fig.show()
COUNTRY GDP (BILLIONS) CODE 0 Afghanistan 21.71 AFG 1 Albania 13.40 ALB 2 Algeria 227.80 DZA 3 American Samoa 0.75 ASM 4 Andorra 4.80 AND Columns in the DataFrame: Index(['COUNTRY', 'GDP (BILLIONS)', 'CODE'], dtype='object')
In [ ]:
Choropleth Map Visualization¶
In [12]:
import folium
import pandas as pd
import json
import requests
# URL for the GeoJSON file
geojson_url = 'https://raw.githubusercontent.com/datasets/geo-boundaries-world-110m/master/countries.geojson'
# Download the GeoJSON file
response = requests.get(geojson_url)
with open('E:/world_countries.geojson', 'wb') as f:
f.write(response.content)
# Load conflict data - Example DataFrame
# This should be replaced with your actual conflict data loading
data = {
'Country': ['USA', 'India', 'China', 'Russia', 'Israel'],
'UN_Interventions': [1, 3, 2, 2, 2] # Numeric values for conflict intensity
}
conflict_data = pd.DataFrame(data)
# Load geo data from the downloaded GeoJSON file
with open('E:/world_countries.geojson') as f:
geo_data = json.load(f)
# Create a map centered at a given location
world_map = folium.Map(location=[20, 0], zoom_start=2)
# Create a choropleth map
folium.Choropleth(
geo_data=geo_data,
data=conflict_data,
columns=['Country', 'UN_Interventions'], # Ensure these columns exist in conflict_data
key_on='feature.properties.name', # Adjust key_on based on your geo_data structure
fill_color='YlOrRd',
legend_name='UN_Interventions' # Updated legend name
).add_to(world_map)
# Display the map
world_map
Out[12]:
Make this Notebook Trusted to load map: File -> Trust Notebook
Bubble Map Visualization¶
In [6]:
import plotly.express as px
# Sample DataFrame (replace 'df' with your actual DataFrame containing 'Country' and 'Conflict_Intensity')
data = {
'Country': ['India', 'United States', 'China', 'Russia', 'Germany'],
'Conflict_Intensity': [70, 30, 50, 40, 25]
}
import pandas as pd
df = pd.DataFrame(data)
# Create a scatter_geo plot with bubble sizes based on conflict intensity
fig = px.scatter_geo(df, locations="Country", locationmode='country names',
size="Conflict_Intensity", projection="natural earth",
size_max=50) # Increase or decrease the value to control bubble size
# Set the width and height of the bubble map
fig.update_layout(width=1000, height=800) # Adjust these values as needed
# Show the plot
fig.show()
Clustering - KMeans for Conflict Classification¶
In [3]:
# Import necessary libraries
import pandas as pd
from sklearn.preprocessing import StandardScaler
from sklearn.cluster import KMeans
# 1. Scaling the data using StandardScaler (you can use MinMaxScaler if needed)
scaler = StandardScaler()
X_scaled = scaler.fit_transform(X) # Scale the features
# 2. Applying KMeans clustering
kmeans = KMeans(n_clusters=5)
clusters = kmeans.fit_predict(X_scaled)
# 3. Adding the cluster labels to the DataFrame
df['Cluster'] = clusters
# Display the DataFrame with clusters
print(df)
Feature1 Feature2 Cluster 0 5.1 3.5 1 1 4.9 3.0 0 2 4.7 3.2 4 3 4.6 3.1 4 4 5.0 3.6 1 5 5.4 3.9 3 6 4.6 3.4 2 7 5.0 3.4 1
D:\Anaconda app\Lib\site-packages\sklearn\cluster\_kmeans.py:1446: UserWarning: KMeans is known to have a memory leak on Windows with MKL, when there are less chunks than available threads. You can avoid it by setting the environment variable OMP_NUM_THREADS=1. warnings.warn(
Dimensionality Reduction using PCA¶
In [8]:
from sklearn.decomposition import PCA
from sklearn.preprocessing import StandardScaler
import matplotlib.pyplot as plt
import pandas as pd
import numpy as np
# Assuming y_train is a simple target variable (e.g., binary classification)
y_train = np.array([0, 1, 0, 1, 0])
# Scaling the data
scaler = StandardScaler()
X_train_scaled = scaler.fit_transform(df)
# Applying PCA for dimensionality reduction
pca = PCA(n_components=2)
X_pca = pca.fit_transform(X_train_scaled)
# Visualizing the PCA components
plt.figure(figsize=(8, 6))
plt.scatter(X_pca[:, 0], X_pca[:, 1], c=y_train, cmap='viridis') # Color by target (y_train)
plt.title("PCA on Conflict Data")
plt.xlabel("Principal Component 1")
plt.ylabel("Principal Component 2")
plt.colorbar()
plt.show()
Anomaly Detection using Isolation Forest¶
In [7]:
import numpy as np
import pandas as pd
from sklearn.ensemble import IsolationForest
import seaborn as sns
import matplotlib.pyplot as plt
# Sample data for conflict mapping
data = {
'Country': ['USA', 'Russia', 'India', 'China', 'Ukraine', 'Israel', 'Palestine', 'France', 'Germany', 'Pakistan', 'Taiwan'],
'Conflict_Region': ['Mid North America', 'Eastern Europe', 'South Asia', 'East Asia', 'Eastern Europe', 'Middle East', 'Middle East', 'Western Europe', 'Western Europe', 'South West Asia', 'East Asia'],
'Conflict_Type': ['Tension', 'War', 'Tension', 'Potential Conflict', 'War', 'War', 'War', 'Potential Conflict', 'Tension', 'Extreme Tension', 'Potential Conflict'],
'Latitude': [37.0902, 61.5240, 20.5937, 35.8617, 48.3794, 31.0461, 31.9522, 46.6034, 51.1657, 30.3753, 23.6978],
'Longitude': [-95.7129, 105.3188, 78.9629, 104.1954, 31.1656, 34.8516, 35.2332, 1.8883, 10.4515, 69.3451, 121.0200],
'Altitude': [760, 600, 160, 1840, 175, 508, 795, 375, 263, 900, 1150],
'Conflict_Intensity': [10, 20, 30, 25, 15, 18, 5, 12, 22, 16, 14],
'Deaths': np.random.randint(1000, 50000, size=11),
'Economic_Impact_Billion': np.random.uniform(1.5, 100, size=11),
'Environmental_Damage_Index': np.random.uniform(1, 10, size=11),
'UN_Interventions': np.random.choice([1, 2, 3, 4], size=11),
'Total_Population': [331002651, 145912025, 1380004385, 1439323776, 43733762, 8655535, 5000000, 65273511, 83783942, 225199937, 23816775],
'Male_Population': [162000000, 67000000, 705000000, 724000000, 22000000, 4300000, 2500000, 32000000, 41000000, 113000000, 12000000],
'Female_Population': [169000000, 78900000, 675000000, 715000000, 21700000, 4350000, 2500000, 33200000, 42700000, 112000000, 11800000]
}
df = pd.DataFrame(data)
# Features for anomaly detection
features = ['Conflict_Intensity', 'Deaths', 'Economic_Impact_Billion', 'Environmental_Damage_Index', 'UN_Interventions', 'Altitude']
X = df[features]
# Initialize and fit the Isolation Forest
isolation_forest = IsolationForest(contamination=0.1, random_state=42)
df['Anomaly'] = isolation_forest.fit_predict(X)
# Print anomalies
anomalies = df[df['Anomaly'] == -1]
print(f"Anomalies detected:\n{anomalies}")
# Create a Swarm Plot to visualize Conflict Intensity vs Region
plt.figure(figsize=(12, 6))
sns.swarmplot(x='Conflict_Region', y='Conflict_Intensity', data=df, hue='Anomaly', palette='Set2')
# Add labels and title
plt.title('Swarm Plot of Conflict Intensity by Region (with Anomalies)', fontsize=16)
plt.xlabel('Conflict Region', fontsize=12)
plt.ylabel('Conflict Intensity', fontsize=12)
# Show the plot
plt.show()
Anomalies detected: Country Conflict_Region Conflict_Type Latitude Longitude Altitude \ 3 China East Asia Potential Conflict 35.8617 104.1954 1840 Conflict_Intensity Deaths Economic_Impact_Billion \ 3 25 30969 48.758461 Environmental_Damage_Index UN_Interventions Total_Population \ 3 8.675396 2 1439323776 Male_Population Female_Population Anomaly 3 724000000 715000000 -1
Gantt Chart for Conflict Timelines¶
In [6]:
import plotly.figure_factory as ff
df = [dict(Task='Conflict 1', Start='2022-01-01', Finish='2022-12-31'),
dict(Task='Conflict 2', Start='2021-05-15', Finish='2022-09-30')]
fig = ff.create_gantt(df)
fig.show()
Feature Importance using XGBoost¶
In [8]:
# Import necessary libraries
import pandas as pd
from sklearn.model_selection import train_test_split
from sklearn.datasets import load_iris # Using Iris dataset as an example
import xgboost as xgb
import matplotlib.pyplot as plt
# Load the dataset (you can replace this with your own dataset)
iris = load_iris()
X = iris.data # Features
y = iris.target # Target
# 1. Split the data into training and testing sets
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=42)
# 2. Initialize and train the XGBoost classifier
xgb_model = xgb.XGBClassifier()
xgb_model.fit(X_train, y_train)
# 3. Plot feature importance
xgb.plot_importance(xgb_model)
plt.show()
In [4]:
import xgboost as xgb
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import StandardScaler
from sklearn.metrics import accuracy_score, classification_report, confusion_matrix
from sklearn.datasets import make_classification
import pandas as pd
# Creating a synthetic dataset with 1000 samples and 20 features for binary classification
X, y = make_classification(n_samples=1000, n_features=20, n_informative=15,
n_redundant=5, n_classes=2, random_state=42)
# Splitting the data into training and testing sets
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=42)
# Feature scaling
scaler = StandardScaler()
X_train_scaled = scaler.fit_transform(X_train)
X_test_scaled = scaler.transform(X_test)
# Building the XGBoost Model
xgboost_model = xgb.XGBClassifier(objective='binary:logistic', n_estimators=100, seed=42)
xgboost_model.fit(X_train_scaled, y_train)
# Predictions
y_pred_xgb = xgboost_model.predict(X_test_scaled)
# Evaluating the model
print("Accuracy:", accuracy_score(y_test, y_pred_xgb))
print("Classification Report:\n", classification_report(y_test, y_pred_xgb))
print("Confusion Matrix:\n", confusion_matrix(y_test, y_pred_xgb))
Accuracy: 0.915
Classification Report:
precision recall f1-score support
0 0.92 0.92 0.92 106
1 0.91 0.90 0.91 94
accuracy 0.92 200
macro avg 0.91 0.91 0.91 200
weighted avg 0.91 0.92 0.91 200
Confusion Matrix:
[[98 8]
[ 9 85]]
Ternary Plot for Visualizing Multiclass Conflict Data¶
In [6]:
import plotly.express as px
import pandas as pd
# Example: Create a DataFrame (replace this with your actual DataFrame)
data = {
'India': [0.2, 0.3, 0.1, 0.4],
'Russia': [0.3, 0.1, 0.4, 0.2],
'China': [0.5, 0.6, 0.5, 0.4],
'Conflict_Intensity': [10, 20, 15, 25]
}
df = pd.DataFrame(data)
# Verify column names (check if "India", "Russia", "Pakistan" exist)
print(df.columns)
# Create a scatter ternary plot
fig = px.scatter_ternary(df, a="India", b="Russia", c="China", size="Conflict_Intensity")
fig.show()
Index(['India', 'Russia', 'China', 'Conflict_Intensity'], dtype='object')
Sankey Diagram for Conflict Flow¶
In [11]:
import pandas as pd
import numpy as np
import plotly.graph_objects as go
import matplotlib.pyplot as plt
import seaborn as sns
# Define the dataset
data = {
'Country': ['USA', 'Russia', 'India', 'China', 'Ukraine', 'Israel', 'Palestine', 'France', 'Germany', 'Pakistan', 'Taiwan'],
'Conflict_Region': ['Mid North America', 'Eastern Europe', 'South Asia', 'East Asia', 'Eastern Europe',
'Middle East', 'Middle East', 'Western Europe', 'Western Europe', 'South West Asia', 'East Asia'],
'Conflict_Type': ['Tension', 'War', 'Tension', 'Potential Conflict', 'War', 'War', 'War',
'Potential Conflict', 'Tension', 'Extreme Tension', 'Potential Conflict'],
'Latitude': [37.0902, 61.5240, 20.5937, 35.8617, 48.3794, 31.0461, 31.9522, 46.6034, 51.1657, 30.3753, 23.6978],
'Longitude': [-95.7129, 105.3188, 78.9629, 104.1954, 31.1656, 34.8516, 35.2332, 1.8883, 10.4515, 69.3451, 121.0200],
'Altitude': [760, 600, 160, 1840, 175, 508, 795, 375, 263, 900, 1150],
'Conflict_Intensity': [10, 20, 30, 25, 15, 18, 5, 12, 22, 16, 14],
'Deaths': np.random.randint(1000, 50000, size=11),
'Economic_Impact_Billion': np.random.uniform(1.5, 100, size=11),
'Environmental_Damage_Index': np.random.uniform(1, 10, size=11),
'UN_Interventions': np.random.choice([1, 2, 3, 4], size=11),
'Total_Population': [331002651, 145912025, 1380004385, 1439323776, 43733762, 8655535, 5000000,
65273511, 83783942, 225199937, 23816775],
'Male_Population': [162000000, 67000000, 705000000, 724000000, 22000000, 4300000, 2500000,
32000000, 41000000, 113000000, 12000000],
'Female_Population': [169000000, 78900000, 675000000, 715000000, 21700000, 4350000, 2500000,
33200000, 42700000, 112000000, 11800000]
}
# Create a DataFrame
df = pd.DataFrame(data)
# Create a source-target mapping for Sankey Diagram
sources = df['Conflict_Type']
targets = df['Conflict_Region']
values = df['Conflict_Intensity']
# Create unique labels for the Sankey diagram
all_labels = pd.concat([sources, targets]).unique()
source_indices = [list(all_labels).index(s) for s in sources]
target_indices = [list(all_labels).index(t) + len(sources) for t in targets]
# Initialize the Sankey Diagram
fig = go.Figure(data=[go.Sankey(
node=dict(
pad=15,
thickness=20,
line=dict(color='black', width=0.5),
label=list(all_labels),
color='blue'
),
link=dict(
source=source_indices,
target=target_indices,
value=values
)
)])
# Update layout with custom size
fig.update_layout(
title_text="Sankey Diagram for Conflict Flow",
font_size=20,
width=1000, # Adjust width as needed
height=600 # Adjust height as needed
)
fig.show()
Heatmap for Correlation Analysis¶
In [4]:
import seaborn as sns
import matplotlib.pyplot as plt
import pandas as pd
# Sample data to create a DataFrame
data = {
'Feature1': [1, 2, 3, 4, 5],
'Feature2': [5, 4, 3, 2, 1],
'Feature3': [2, 3, 4, 5, 6],
'Feature4': [10, 9, 8, 7, 6]
}
# Create a DataFrame
df = pd.DataFrame(data)
# Correlation Heatmap
plt.figure(figsize=(10, 8))
sns.heatmap(df.corr(), annot=True, cmap="coolwarm") # Creating the heatmap
plt.title('Heatmap of Feature Correlations') # Adding a title
plt.show() # Display the plot
Radar Chart for Conflict Severity¶
In [11]:
from math import pi
import pandas as pd
import matplotlib.pyplot as plt
import numpy as np
from sklearn.preprocessing import LabelEncoder
# Example DataFrame structure (replace this with actual data)
data = {
'Country': ['USA', 'Russia', 'India', 'China', 'Ukraine', 'Israel', 'Palestine', 'France', 'Germany', 'Pakistan', 'Taiwan'],
'Conflict_Region': ['Mid North America', 'Eastern Europe', 'South Asia', 'East Asia', 'Eastern Europe', 'Middle East', 'Middle East', 'Western Europe', 'Western Europe', 'South West Asia', 'East Asia'],
'Conflict_Type': ['Tension', 'War', 'Tension', 'Potential Conflict', 'War', 'War', 'War', 'Potential Conflict', 'Tension', 'Extreme Tension', 'Potential Conflict'],
'Latitude': [37.0902, 61.5240, 20.5937, 35.8617, 48.3794, 31.0461, 31.9522, 46.6034, 51.1657, 30.3753, 23.6978],
'Longitude': [-95.7129, 105.3188, 78.9629, 104.1954, 31.1656, 34.8516, 35.2332, 1.8883, 10.4515, 69.3451, 121.0200],
'Altitude': [760, 600, 160, 1840, 175, 508, 795, 375, 263, 900, 1150], # Corrected closing bracket for altitude
}
df = pd.DataFrame(data)
# Encoding the categorical columns 'Conflict_Region' and 'Conflict_Type'
label_encoder = LabelEncoder()
df['Conflict_Region'] = label_encoder.fit_transform(df['Conflict_Region'])
df['Conflict_Type'] = label_encoder.fit_transform(df['Conflict_Type'])
# Now using the correct column names
categories = ['Conflict_Region', 'Conflict_Type', 'Latitude', 'Longitude'] # No 'Country' for mean calculation
# Preparing the data for radar chart
conflict_data = df[categories].mean().values
# Number of variables
N = len(categories)
# Radar chart angles
angles = [n / float(N) * 2 * pi for n in range(N)]
angles += angles[:1]
conflict_data = np.concatenate([conflict_data, [conflict_data[0]]])
plt.figure(figsize=(6, 6))
ax = plt.subplot(111, polar=True)
plt.xticks(angles[:-1], categories)
# Plot data
ax.plot(angles, conflict_data, linewidth=2, linestyle='solid')
ax.fill(angles, conflict_data, 'b', alpha=0.4)
plt.title("Radar Chart for Conflict Severity")
plt.show()
WordCloud for Textual Analysis¶
In [7]:
import pandas as pd
from wordcloud import WordCloud
import matplotlib.pyplot as plt
# Sample DataFrame with exact conflict types and related UN report texts
data = {'Conflict_Type': ['Syrian Civil War', 'Yemeni Civil War', 'Sudan Conflict', 'Rohingya Crisis',
'Libyan Civil War', 'Afghanistan Conflict', 'Israeli–Palestinian Conflict',
'Ukraine Conflict', 'Somali Civil War', 'Ethiopian Civil Conflict',
'Kashmir Conflict']}
df = pd.DataFrame(data)
# Actual brief UN report texts related to each conflict (replace with accurate summaries)
un_reports = [
"The Syrian conflict has resulted in over 13 million people in need of humanitarian assistance.",
"The Yemeni conflict has led to the world’s worst humanitarian crisis, with millions facing famine.",
"The Sudan conflict has displaced millions, causing severe food insecurity and humanitarian concerns.",
"UN officials report that the Rohingya crisis has displaced hundreds of thousands from Myanmar.",
"Libya remains in turmoil post-Gaddafi, with rival factions and international involvement prolonging the conflict.",
"Years of fighting in Afghanistan have left millions displaced and thousands of civilian casualties.",
"The Israeli-Palestinian conflict continues to result in high casualties and deepening humanitarian crises.",
"The Ukraine conflict has caused thousands of deaths and displaced millions, creating a large refugee crisis.",
"Somalia remains embroiled in conflict, with the UN reporting widespread hunger and displacement.",
"The Ethiopian civil conflict has led to mass displacement and a deepening humanitarian emergency.",
"The Kashmir conflict between India and Pakistan continues to cause tensions, with significant human rights concerns."
]
# Ensure the UN report texts match the number of rows in the DataFrame
df['UN_report'] = un_reports
# Now, generate the WordCloud
text = " ".join(str(report) for report in df['UN_report'])
wordcloud = WordCloud(width=800, height=400, background_color='white').generate(text)
plt.figure(figsize=(10, 5))
plt.imshow(wordcloud, interpolation='bilinear')
plt.axis('off')
plt.title("WordCloud of UN Reports on Global Conflicts")
plt.show()
Time Series Forecasting with ARIMA¶
In [22]:
import pandas as pd
import statsmodels.api as sm
import matplotlib.pyplot as plt
# Sample DataFrame (adjust this to your actual data)
data = {
'Country': ['USA', 'Russia', 'India', 'China', 'Ukraine', 'Israel', 'Palestine', 'France', 'Germany', 'Pakistan', 'Taiwan'],
'conflict_intensity': [1, 2, 3, 4, 5, 6, 5, 4, 3, 2, 1] # Sample conflict intensity data
}
df = pd.DataFrame(data)
# Fitting an ARIMA model to forecast conflict intensity over time
conflict_time_series = df.set_index('Country')['conflict_intensity']
arima_model = sm.tsa.ARIMA(conflict_time_series, order=(5, 1, 0))
arima_results = arima_model.fit()
# Forecasting
forecast = arima_results.forecast(steps=10)
# Adjust the plot size here
plt.figure(figsize=(18, 6)) # Change width (12) and height (6) as needed
plt.plot(conflict_time_series, label="Observed")
plt.plot(forecast, label="Forecast", color="red")
plt.title("ARIMA Forecast of Conflict Intensity")
plt.legend()
plt.show()
D:\Anaconda app\Lib\site-packages\statsmodels\tsa\base\tsa_model.py:559: UserWarning: Could not infer format, so each element will be parsed individually, falling back to `dateutil`. To ensure parsing is consistent and as-expected, please specify a format.
_index = to_datetime(index)
D:\Anaconda app\Lib\site-packages\statsmodels\tsa\base\tsa_model.py:473: ValueWarning: An unsupported index was provided and will be ignored when e.g. forecasting.
self._init_dates(dates, freq)
D:\Anaconda app\Lib\site-packages\statsmodels\tsa\base\tsa_model.py:559: UserWarning: Could not infer format, so each element will be parsed individually, falling back to `dateutil`. To ensure parsing is consistent and as-expected, please specify a format.
_index = to_datetime(index)
D:\Anaconda app\Lib\site-packages\statsmodels\tsa\base\tsa_model.py:473: ValueWarning: An unsupported index was provided and will be ignored when e.g. forecasting.
self._init_dates(dates, freq)
D:\Anaconda app\Lib\site-packages\statsmodels\tsa\base\tsa_model.py:559: UserWarning: Could not infer format, so each element will be parsed individually, falling back to `dateutil`. To ensure parsing is consistent and as-expected, please specify a format.
_index = to_datetime(index)
D:\Anaconda app\Lib\site-packages\statsmodels\tsa\base\tsa_model.py:473: ValueWarning: An unsupported index was provided and will be ignored when e.g. forecasting.
self._init_dates(dates, freq)
D:\Anaconda app\Lib\site-packages\statsmodels\tsa\statespace\sarimax.py:966: UserWarning: Non-stationary starting autoregressive parameters found. Using zeros as starting parameters.
warn('Non-stationary starting autoregressive parameters'
D:\Anaconda app\Lib\site-packages\statsmodels\base\model.py:607: ConvergenceWarning: Maximum Likelihood optimization failed to converge. Check mle_retvals
warnings.warn("Maximum Likelihood optimization failed to "
D:\Anaconda app\Lib\site-packages\statsmodels\tsa\base\tsa_model.py:836: ValueWarning: No supported index is available. Prediction results will be given with an integer index beginning at `start`.
return get_prediction_index(
D:\Anaconda app\Lib\site-packages\statsmodels\tsa\base\tsa_model.py:836: FutureWarning: No supported index is available. In the next version, calling this method in a model without a supported index will result in an exception.
return get_prediction_index(
Sunburst Chart for Conflict Types¶
In [46]:
import pandas as pd
import numpy as np
import plotly.express as px # Importing plotly express as px
# Sample DataFrame (adjust this to your actual data)
data = {
'Country': ['USA', 'Russia', 'India', 'China', 'Ukraine', 'Israel', 'Palestine', 'France', 'Germany', 'Pakistan', 'Taiwan'],
'Conflict_Region': ['Mid North America', 'Eastern Europe', 'South Asia', 'East Asia', 'Eastern Europe', 'Middle East', 'Middle East', 'Western Europe', 'Western Europe', 'South West Asia', 'East Asia'],
'Conflict_Type': ['Tension', 'War', 'Tension', 'Potential Conflict', 'War', 'War', 'War', 'Potential Conflict', 'Tension', 'Extreme Tension', 'Potential Conflict'],
'Latitude': [37.0902, 61.5240, 20.5937, 35.8617, 48.3794, 31.0461, 31.9522, 46.6034, 51.1657, 30.3753, 23.6978],
'Longitude': [-95.7129, 105.3188, 78.9629, 104.1954, 31.1656, 34.8516, 35.2332, 1.8883, 10.4515, 69.3451, 121.0200],
'Altitude': [760, 600, 160, 1840, 175, 508, 795, 375, 263, 900, 1150],
'Conflict_Intensity': [10, 20, 30, 25, 15, 18, 5, 12, 22, 16, 14],
'Deaths': np.random.randint(1000, 50000, size=11), # Adjusted to 11 entries
'Economic_Impact_Billion': np.random.uniform(1.5, 100, size=11), # Adjusted to 11 entries
'Environmental_Damage_Index': np.random.uniform(1, 10, size=11), # Adjusted to 11 entries
'UN_Interventions': np.random.choice([1, 2, 3, 4], size=11), # Adjusted to 11 entries
'Total_Population': [331002651, 145912025, 1380004385, 1439323776, 43733762, 8655535, 5000000, 65273511, 83783942, 225199937, 23816775],
'Male_Population': [162000000, 67000000, 705000000, 724000000, 22000000, 4300000, 2500000, 32000000, 41000000, 113000000, 12000000],
'Female_Population': [169000000, 78900000, 675000000, 715000000, 21700000, 4350000, 2500000, 33200000, 42700000, 112000000, 11800000]
}
df = pd.DataFrame(data)
# Sunburst chart to show hierarchical breakdown of conflict types by country and region
fig = px.sunburst(df, path=['Conflict_Region', 'Country', 'Conflict_Type'], values='Conflict_Intensity') # Corrected column names
# Set the size of the chart
fig.update_layout(
title="Sunburst Chart of Conflict Types by Region and Country",
width=800, # Set desired width in pixels
height=600 # Set desired height in pixels
)
fig.show()
Sankey Diagram for Conflict Flows¶
In [26]:
import plotly.graph_objects as go
# Sankey Diagram representing conflict flows between countries
fig = go.Figure(go.Sankey(
node = dict(
label = ["USA", "Russia", "Ukraine", "UN", "EU"],
pad = 15, thickness = 20
),
link = dict(
source = [0, 1, 1, 2], # Indices of the source nodes
target = [2, 2, 3, 4], # Indices of the target nodes
value = [8, 4, 2, 6] # Value for each link
)
))
fig.update_layout(title="Sankey Diagram of Conflict Flows")
fig.show()
Voronoi Diagram for Regional Conflict Intensity¶
In [28]:
from scipy.spatial import Voronoi, voronoi_plot_2d
import matplotlib.pyplot as plt # Importing matplotlib for plotting
# Ensure that the points are correctly referenced using the right column names
points = df[['Latitude', 'Longitude']].values # Use 'Latitude' and 'Longitude' with capital 'L'
# Create the Voronoi diagram
vor = Voronoi(points)
# Plot the Voronoi diagram
voronoi_plot_2d(vor)
plt.title("Voronoi Diagram of Regional Conflict Intensity")
plt.show()
Hexbin Map for Geopolitical Conflicts¶
In [30]:
# Hexbin map showing density of conflicts
plt.figure(figsize=(10, 6))
plt.hexbin(df['Longitude'], df['Latitude'], gridsize=50, cmap='coolwarm', reduce_C_function=np.mean)
plt.colorbar(label="Conflict Intensity")
plt.title("Hexbin Map of Geopolitical Conflicts")
plt.show()
Contour Plot for Conflict Severity¶
In [33]:
# Contour plot for visualizing conflict severity
plt.figure(figsize=(10, 6))
plt.tricontourf(df['Longitude'], df['Latitude'], df['Conflict_Intensity'], cmap="RdYlBu")
plt.colorbar(label="Conflict Severity")
plt.title("Contour Plot of Conflict Severity by Region")
plt.show()
Box Plot for Economic Impact of Conflicts¶
In [36]:
import matplotlib.pyplot as plt
import seaborn as sns
# Box plot of economic impact of conflicts across different regions
plt.figure(figsize=(12, 8)) # Increase the size by adjusting the values (width, height)
sns.boxplot(x='Conflict_Region', y='Economic_Impact_Billion', data=df)
plt.title("Box Plot of Economic Impact of Conflicts by Region")
plt.xticks(rotation=45) # Optional: Rotate x-axis labels for better visibility
plt.show()
Population Pyramid for Affected Areas¶
In [14]:
import pandas as pd
import matplotlib.pyplot as plt
import numpy as np
# Sample Data
data = {
'Country': ['USA', 'Russia', 'India', 'China', 'Ukraine', 'Israel', 'Palestine', 'France', 'Germany', 'Pakistan', 'Taiwan'],
'Conflict_Region': ['Mid North America', 'Eastern Europe', 'South Asia', 'East Asia', 'Eastern Europe', 'Middle East', 'Middle East', 'Western Europe', 'Western Europe', 'South West Asia', 'East Asia'],
'Conflict_Type': ['Tension', 'War', 'Tension', 'Potential Conflict', 'War', 'War', 'War', 'Potential Conflict', 'Tension', 'Extreme Tension', 'Potential Conflict'],
'Latitude': [37.0902, 61.5240, 20.5937, 35.8617, 48.3794, 31.0461, 31.9522, 46.6034, 51.1657, 30.3753, 23.6978],
'Longitude': [-95.7129, 105.3188, 78.9629, 104.1954, 31.1656, 34.8516, 35.2332, 1.8883, 10.4515, 69.3451, 121.0200],
'Altitude': [760, 600, 160, 1840, 175, 508, 795, 375, 263, 900, 1150],
'Conflict_Intensity': [10, 20, 30, 25, 15, 18, 5, 12, 22, 16, 14],
'Deaths': np.random.randint(1000, 50000, size=11), # Adjusted to 11 entries
'Economic_Impact_Billion': np.random.uniform(1.5, 100, size=11), # Adjusted to 11 entries
'Environmental_Damage_Index': np.random.uniform(1, 10, size=11), # Adjusted to 11 entries
'UN_Interventions': np.random.choice([1, 2, 3, 4], size=11), # Adjusted to 11 entries
'Total_Population': [331002651, 145912025, 1380004385, 1439323776, 43733762, 8655535, 5000000, 65273511, 83783942, 225199937, 23816775],
'Male_Population': [162000000, 67000000, 705000000, 724000000, 22000000, 4300000, 2500000, 32000000, 41000000, 113000000, 12000000],
'Female_Population': [169000000, 78900000, 675000000, 715000000, 21700000, 4350000, 2500000, 33200000, 42700000, 112000000, 11800000]
}
# Create DataFrame
df = pd.DataFrame(data)
# Plotting Population Pyramid with a new design
def plot_population_pyramid(df):
fig, ax = plt.subplots(figsize=(12, 8))
# Bar plot for male and female population with new colors
countries = df['Country']
male_population = df['Male_Population']
female_population = df['Female_Population']
# Using a new color palette for males and females
ax.barh(countries, -male_population, color='lightblue', edgecolor='black', hatch='//', label='Male Population')
ax.barh(countries, female_population, color='lightcoral', edgecolor='black', hatch='xx', label='Female Population')
# Adding grid lines and enhancing plot aesthetics
ax.grid(True, which='both', axis='x', linestyle='--', color='gray', alpha=0.7)
# Adding labels and formatting
ax.set_xlabel('Population (in billions)', fontsize=12)
ax.set_title('Population Pyramid for Conflict-Affected Areas', fontsize=15, fontweight='bold')
ax.legend()
# Inverting x-axis for male population
ax.set_xlim([-max(male_population) * 1.1, max(female_population) * 1.1])
# Adding custom tick labels for the x-axis
ax.set_xticks(np.arange(-800000000, 900000000, 200000000))
ax.set_xticklabels(['800M', '600M', '400M', '200M', '0', '200M', '400M', '600M', '800M'])
# Add a box around the plot for better readability
for spine in ax.spines.values():
spine.set_edgecolor('black')
plt.tight_layout()
plt.show()
# Call the function to plot the population pyramid with a new design
plot_population_pyramid(df)
Cumulative Flow Diagram for Conflict Progression¶
In [60]:
import pandas as pd
import matplotlib.pyplot as plt
# Sample DataFrame
# Make sure to replace this with your actual DataFrame
data = {
'Conflict_Type': ['Type1', 'Type1', 'Type2', 'Type2', 'Type1'],
'Conflict_Region': ['1', '2', '1', '2', '3'] # Example values that should be numeric
}
df = pd.DataFrame(data)
# Convert 'Conflict_Region' to numeric, forcing errors to NaN
df['Conflict_Region'] = pd.to_numeric(df['Conflict_Region'], errors='coerce')
# Calculate cumulative sum of the Conflict_Region
conflict_progression = df.groupby('Conflict_Type')['Conflict_Region'].cumsum()
# Plotting the cumulative flow diagram
plt.figure(figsize=(10, 6))
plt.plot(df['Conflict_Type'], conflict_progression, label='Cumulative Conflict Progression')
plt.title("Cumulative Flow Diagram of Conflict Progression")
plt.xlabel("Conflict Type")
plt.ylabel("Cumulative Conflict Region")
plt.legend()
plt.show()
Coxcomb Chart for UN Resolutions by Country¶
In [15]:
import pandas as pd
import plotly.express as px
# Sample DataFrame
data = {
'Country': ['USA', 'Russia', 'India', 'China', 'Ukraine', 'Israel', 'Palestine', 'France', 'Germany', 'Pakistan', 'Taiwan'],
'Conflict_Region': ['Mid North America', 'Eastern Europe', 'South Asia', 'East Asia', 'Eastern Europe', 'Middle East', 'Middle East', 'Western Europe', 'Western Europe', 'South West Asia', 'East Asia'],
'Conflict_Type': ['Tension', 'War', 'Tension', 'Potential Conflict', 'War', 'War', 'War', 'Potential Conflict', 'Tension', 'Extreme Tension', 'Potential Conflict']
}
df = pd.DataFrame(data)
# Count the number of resolutions by Country and Conflict_Type
df_counts = df.groupby(['Country', 'Conflict_Type']).size().reset_index(name='Count')
# Create a Coxcomb (polar line) chart
fig = px.line_polar(df_counts, r='Count', theta='Country', line_close=True)
# Update the figure to fill the area under the line
fig.update_traces(fill='toself')
# Add title and adjust the size (customize the width and height as per your requirement)
fig.update_layout(
title="Coxcomb Chart of UN Resolutions by Country",
width=800, # Set width (e.g., increase or decrease this value as per your needs)
height=800 # Set height (e.g., increase or decrease this value as per your needs)
)
# Display the figure
fig.show()
Timeline Chart for Conflict Events¶
In [12]:
import pandas as pd
import plotly.express as px
# Example DataFrame
data = {
'Country': ['USA', 'Russia', 'India', 'China', 'Ukraine', 'Israel', 'Palestine', 'France', 'Germany', 'Pakistan', 'Taiwan'],
'Start_Date': ['2020-01-01', '2021-05-01', '2022-07-01', '2022-08-01', '2022-09-01', '2022-10-01', '2022-11-01', '2022-12-01', '2023-01-01', '2023-02-01', '2023-03-01'],
'End_Date': ['2020-12-31', '2021-12-31', '2022-12-31', '2022-12-31', '2022-12-31', '2022-12-31', '2022-12-31', '2022-12-31', '2023-12-31', '2023-12-31', '2023-12-31'],
'Conflict_Type': ['Tension', 'War', 'Tension', 'Potential Conflict', 'War', 'War', 'War', 'Potential Conflict', 'Tension', 'Extreme Tension', 'Potential Conflict'],
'Conflict_Region': ['Mid North America', 'Eastern Europe', 'South Asia', 'East Asia', 'Eastern Europe', 'Middle East', 'Middle East', 'Western Europe', 'Western Europe', 'South West Asia', 'East Asia']
}
df = pd.DataFrame(data)
# Convert Start_Date and End_Date to datetime
df['Start_Date'] = pd.to_datetime(df['Start_Date'])
df['End_Date'] = pd.to_datetime(df['End_Date'])
# Timeline chart to show key events in major conflicts
fig = px.timeline(df, x_start="Start_Date", x_end="End_Date", y="Country",
color="Conflict_Type", hover_name="Country")
# Adjust size with width and height as per your requirement
fig.update_layout(
title="Timeline of Major Conflict Events",
width=1000, # Adjust width here (e.g., increase or decrease this value)
height=600 # Adjust height here (e.g., increase or decrease this value)
)
# Display the figure
fig.show()
Chord Diagram for International Alliances¶
In [82]:
import numpy as np
import matplotlib.pyplot as plt
# Data for the chord diagram
labels = ["USA", "Russia", "China", "EU", "NATO"]
n = len(labels)
# Create the connections (source, target, values)
source = [0, 1, 2] # Indices of source nodes
target = [2, 3, 4] # Indices of target nodes
values = [10, 20, 15] # Values corresponding to the links
# Prepare the circle layout for the nodes
theta = np.linspace(0, 2 * np.pi, n, endpoint=False)
# Create figure and axis
fig, ax = plt.subplots(figsize=(8, 8), subplot_kw={'projection': 'polar'})
# Plot the nodes
for i in range(n):
ax.text(theta[i], 0.5, labels[i], horizontalalignment='center', verticalalignment='center', fontsize=12)
# Draw the connections
for s, t, v in zip(source, target, values):
# Draw lines (chords) between nodes
ax.plot([theta[s], theta[t]], [0.5, 0.5], linewidth=v, color='b', alpha=0.6)
# Set limits and title
ax.set_ylim(0, 1)
ax.set_title("Chord Diagram of International Alliances", fontsize=16, pad=20)
plt.show()
Matrix Plot for Diplomatic Relations¶
In [83]:
# Matrix plot for showing diplomatic relations between countries
matrix_data = np.random.rand(10, 10)
plt.matshow(matrix_data, cmap='viridis')
plt.colorbar()
plt.title("Matrix Plot for Diplomatic Relations")
plt.show()
Density Plot for Conflict Casualties¶
In [86]:
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
# Sample DataFrame for demonstration
data = {
'Country': ['USA', 'Russia', 'India', 'China', 'Ukraine'],
'Casualties': [100, 200, 150, 300, 250]
}
df = pd.DataFrame(data)
# Check the columns of the DataFrame
print("Columns in DataFrame:", df.columns)
# If 'Total_Population' doesn't exist, use 'Casualties' or create one
if 'Total_Population' in df.columns:
population_data = df['Total_Population']
else:
# Use existing 'Casualties' data for density plot
population_data = df['Casualties']
# Density plot for visualizing distribution of casualties
plt.figure(figsize=(10, 6))
sns.kdeplot(population_data, shade=True)
plt.title("Density Plot of Conflict Casualties")
plt.xlabel("Casualties")
plt.ylabel("Density")
plt.show()
Columns in DataFrame: Index(['Country', 'Casualties'], dtype='object')
C:\Users\pradu\AppData\Local\Temp\ipykernel_6516\1055017277.py:25: FutureWarning: `shade` is now deprecated in favor of `fill`; setting `fill=True`. This will become an error in seaborn v0.14.0; please update your code.
Slope Chart for Conflict Severity over Time¶
In [89]:
import pandas as pd
import plotly.express as px
# Sample DataFrame for demonstration
data = {
'date': ['2020-01', '2020-02', '2020-03', '2020-01', '2020-02', '2020-03'],
'conflict_severity': [2, 3, 4, 1, 2, 3],
'conflict_name': ['Conflict A', 'Conflict A', 'Conflict A', 'Conflict B', 'Conflict B', 'Conflict B']
}
df = pd.DataFrame(data)
# Check the columns of the DataFrame
print("Columns in DataFrame:", df.columns)
# Ensure the required columns exist
required_columns = ['date', 'conflict_severity', 'conflict_name']
for col in required_columns:
if col not in df.columns:
print(f"Column '{col}' is missing from the DataFrame.")
# Handle the missing column as needed, e.g., create it or raise an error
# Slope chart to show changes in conflict severity over time
fig = px.line(df, x="date", y="conflict_severity", color="conflict_name", line_shape="spline")
fig.update_layout(title="Slope Chart of Conflict Severity over Time")
fig.show()
Columns in DataFrame: Index(['date', 'conflict_severity', 'conflict_name'], dtype='object')
Treemap for Geopolitical Influence¶
In [17]:
import pandas as pd
import plotly.express as px
import numpy as np
# Full dataset
data = {
'Country': ['USA', 'Russia', 'India', 'China', 'Ukraine', 'Israel', 'Palestine', 'France', 'Germany', 'Pakistan', 'Taiwan'],
'Conflict_Region': ['Mid North America', 'Eastern Europe', 'South Asia', 'East Asia', 'Eastern Europe', 'Middle East', 'Middle East', 'Western Europe', 'Western Europe', 'South West Asia', 'East Asia'],
'Conflict_Type': ['Tension', 'War', 'Tension', 'Potential Conflict', 'War', 'War', 'War', 'Potential Conflict', 'Tension', 'Extreme Tension', 'Potential Conflict'],
'Latitude': [37.0902, 61.5240, 20.5937, 35.8617, 48.3794, 31.0461, 31.9522, 46.6034, 51.1657, 30.3753, 23.6978],
'Longitude': [-95.7129, 105.3188, 78.9629, 104.1954, 31.1656, 34.8516, 35.2332, 1.8883, 10.4515, 69.3451, 121.0200],
'Altitude': [760, 600, 160, 1840, 175, 508, 795, 375, 263, 900, 1150],
'Conflict_Intensity': [10, 20, 30, 25, 15, 18, 5, 12, 22, 16, 14],
'Deaths': np.random.randint(1000, 50000, size=11),
'Economic_Impact_Billion': np.random.uniform(1.5, 100, size=11),
'Environmental_Damage_Index': np.random.uniform(1, 10, size=11),
'UN_Interventions': np.random.choice([1, 2, 3, 4], size=11),
'Total_Population': [331002651, 145912025, 1380004385, 1439323776, 43733762, 8655535, 5000000, 65273511, 83783942, 225199937, 23816775],
'Male_Population': [162000000, 67000000, 705000000, 724000000, 22000000, 4300000, 2500000, 32000000, 41000000, 113000000, 12000000],
'Female_Population': [169000000, 78900000, 675000000, 715000000, 21700000, 4350000, 2500000, 33200000, 42700000, 112000000, 11800000]
}
# Create DataFrame
df = pd.DataFrame(data)
# Treemap showing geopolitical influence by country and region
fig = px.treemap(df, path=['Country', 'Conflict_Region'], values='Conflict_Intensity',
color='Conflict_Intensity', hover_name='Country')
# Adjust size with width and height as per your requirement
fig.update_layout(
title="Treemap of Geopolitical Influence by Region and Country",
width=1200, # Adjust width here (e.g., increase or decrease this value)
height=800 # Adjust height here (e.g., increase or decrease this value)
)
# Display the figure
fig.show()
NLP-Based Sentiment Analysis¶
In [4]:
import pandas as pd
import matplotlib.pyplot as plt
from nltk.sentiment.vader import SentimentIntensityAnalyzer
# Sample data: list of texts (UN speeches/reports)
reports = [
"The UN stands firm in its commitment to peace.",
"Conflict continues to plague the region, causing suffering.",
"We celebrate the progress made towards sustainable development.",
"There is an urgent need for humanitarian assistance.",
"International cooperation is essential for success."
]
# Initialize Sentiment Analyzer
sid = SentimentIntensityAnalyzer()
# Perform sentiment analysis
sentiments = [sid.polarity_scores(report) for report in reports]
# Convert to DataFrame for visualization
sentiment_df = pd.DataFrame(sentiments)
# Visualize sentiment analysis
sentiment_df[['neg', 'neu', 'pos']].plot(kind='bar', stacked=True, figsize=(10, 6))
plt.title("Sentiment Analysis of UN Reports")
plt.xlabel("Reports")
plt.ylabel("Sentiment Score")
plt.xticks(ticks=range(len(reports)), labels=[f'Report {i+1}' for i in range(len(reports))], rotation=0)
plt.show()
Pareto Chart¶
In [18]:
import matplotlib.pyplot as plt
from matplotlib.ticker import PercentFormatter
import numpy as np
# Actual conflict-related factors
factors = ['Geopolitical Influence', 'Conflict Intensity', 'Economic Impact', 'Environmental Damage']
# Sample contributions data (can be adjusted based on the dataset)
contributions = [60, 25, 10, 5]
# Pareto plot
def plot_pareto_chart(width=8, height=5):
fig, ax = plt.subplots(figsize=(width, height)) # Set figure size dynamically
ax.bar(factors, contributions, color="C0", label='Contributions')
# Calculate cumulative percentages for Pareto line
cumulative_percent = np.cumsum(contributions) / sum(contributions) * 100
ax2 = ax.twinx() # Create a twin Axes sharing the x-axis
ax2.plot(factors, cumulative_percent, color="C1", marker="D", ms=7, label='Cumulative Percentage')
# Format y-axis of ax2 as percentage
ax2.yaxis.set_major_formatter(PercentFormatter())
# Titles and labels
plt.title("Pareto Chart: Global Conflict Factors")
ax.set_xlabel("Factors")
ax.set_ylabel("Contributions")
# Legends
ax.legend(loc='upper left')
ax2.legend(loc='upper right')
# Show plot
plt.show()
# Example of calling the function with dynamic size
plot_pareto_chart(width=10, height=6) # Adjust the width and height as per your requirement
Error Bar¶
In [7]:
import matplotlib.pyplot as plt
import numpy as np
# Sample data
data = {
'x': np.array([1, 2, 3, 4, 5]), # x-values
'y': np.array([2.5, 3.0, 4.0, 5.2, 6.5]), # y-values
'error': np.array([0.5, 0.2, 0.3, 0.4, 0.6]) # error values for y
}
# Create an error bar plot
plt.errorbar(x=data['x'], y=data['y'], yerr=data['error'], fmt='o', color='b', capsize=5)
plt.title("Error Bar Plot")
plt.xlabel("X-axis Label")
plt.ylabel("Y-axis Label")
plt.grid(True) # Add a grid for better readability
plt.show()
Density Plot¶
In [9]:
import matplotlib.pyplot as plt
import seaborn as sns
import numpy as np
# Sample data
np.random.seed(42) # For reproducibility
data = {
'feature': np.random.normal(loc=0, scale=1, size=1000) # Generate 1000 random values from a normal distribution
}
# Create a density plot
sns.kdeplot(data['feature'], shade=True, color='blue')
plt.title("Density Plot")
plt.xlabel("Feature Value")
plt.ylabel("Density")
plt.grid(True) # Add a grid for better readability
plt.show()
C:\Users\pradu\AppData\Local\Temp\ipykernel_11908\1677624797.py:12: FutureWarning: `shade` is now deprecated in favor of `fill`; setting `fill=True`. This will become an error in seaborn v0.14.0; please update your code. sns.kdeplot(data['feature'], shade=True, color='blue')
Funnel Chart¶
In [1]:
import plotly.graph_objects as go
# Actual stages in the conflict resolution process
stages = ["Conflict Identification", "UN Diplomacy Intervention", "Peace Negotiations", "Post-Conflict Recovery"]
# Corresponding values for each stage (example data)
stage_values = [500, 450, 300, 200]
# Create a funnel chart
fig = go.Figure(go.Funnel(
y=stages,
x=stage_values
))
# Show the figure
fig.update_layout(title="Funnel Chart: Conflict Resolution Stages")
fig.show()
Spiral Chart¶
In [2]:
import numpy as np
import matplotlib.pyplot as plt # Importing matplotlib.pyplot as plt
theta = np.linspace(0, 4*np.pi, 100)
r = np.linspace(0, 1, 100)
fig, ax = plt.subplots(subplot_kw={'projection': 'polar'})
ax.plot(theta, r)
plt.title("Spiral Chart")
plt.show()
NLP-based Sentiment Analysis of Social Media during Conflicts¶
In [2]:
pip install spacy pydantic vaderSentiment
Requirement already satisfied: spacy in d:\anaconda app\lib\site-packages (3.7.6) Requirement already satisfied: pydantic in d:\anaconda app\lib\site-packages (2.9.2) Collecting vaderSentiment Downloading vaderSentiment-3.3.2-py2.py3-none-any.whl.metadata (572 bytes) Requirement already satisfied: spacy-legacy<3.1.0,>=3.0.11 in d:\anaconda app\lib\site-packages (from spacy) (3.0.12) Requirement already satisfied: spacy-loggers<2.0.0,>=1.0.0 in d:\anaconda app\lib\site-packages (from spacy) (1.0.5) Requirement already satisfied: murmurhash<1.1.0,>=0.28.0 in d:\anaconda app\lib\site-packages (from spacy) (1.0.10) Requirement already satisfied: cymem<2.1.0,>=2.0.2 in d:\anaconda app\lib\site-packages (from spacy) (2.0.8) Requirement already satisfied: preshed<3.1.0,>=3.0.2 in d:\anaconda app\lib\site-packages (from spacy) (3.0.9) Requirement already satisfied: thinc<8.3.0,>=8.2.2 in d:\anaconda app\lib\site-packages (from spacy) (8.2.5) Requirement already satisfied: wasabi<1.2.0,>=0.9.1 in d:\anaconda app\lib\site-packages (from spacy) (1.1.3) Requirement already satisfied: srsly<3.0.0,>=2.4.3 in d:\anaconda app\lib\site-packages (from spacy) (2.4.8) Requirement already satisfied: catalogue<2.1.0,>=2.0.6 in d:\anaconda app\lib\site-packages (from spacy) (2.0.10) Requirement already satisfied: weasel<0.5.0,>=0.1.0 in d:\anaconda app\lib\site-packages (from spacy) (0.4.1) Requirement already satisfied: typer<1.0.0,>=0.3.0 in d:\anaconda app\lib\site-packages (from spacy) (0.12.5) Requirement already satisfied: tqdm<5.0.0,>=4.38.0 in d:\anaconda app\lib\site-packages (from spacy) (4.66.4) Requirement already satisfied: requests<3.0.0,>=2.13.0 in d:\anaconda app\lib\site-packages (from spacy) (2.32.2) Requirement already satisfied: jinja2 in d:\anaconda app\lib\site-packages (from spacy) (3.1.4) Requirement already satisfied: setuptools in d:\anaconda app\lib\site-packages (from spacy) (69.5.1) Requirement already satisfied: packaging>=20.0 in d:\anaconda app\lib\site-packages (from spacy) (23.2) Requirement already satisfied: langcodes<4.0.0,>=3.2.0 in d:\anaconda app\lib\site-packages (from spacy) (3.4.1) Requirement already satisfied: numpy>=1.19.0 in d:\anaconda app\lib\site-packages (from spacy) (1.26.4) Requirement already satisfied: annotated-types>=0.6.0 in d:\anaconda app\lib\site-packages (from pydantic) (0.6.0) Requirement already satisfied: pydantic-core==2.23.4 in d:\anaconda app\lib\site-packages (from pydantic) (2.23.4) Requirement already satisfied: typing-extensions>=4.6.1 in d:\anaconda app\lib\site-packages (from pydantic) (4.11.0) Requirement already satisfied: language-data>=1.2 in d:\anaconda app\lib\site-packages (from langcodes<4.0.0,>=3.2.0->spacy) (1.2.0) Requirement already satisfied: charset-normalizer<4,>=2 in d:\anaconda app\lib\site-packages (from requests<3.0.0,>=2.13.0->spacy) (2.0.4) Requirement already satisfied: idna<4,>=2.5 in d:\anaconda app\lib\site-packages (from requests<3.0.0,>=2.13.0->spacy) (3.7) Requirement already satisfied: urllib3<3,>=1.21.1 in d:\anaconda app\lib\site-packages (from requests<3.0.0,>=2.13.0->spacy) (2.2.2) Requirement already satisfied: certifi>=2017.4.17 in d:\anaconda app\lib\site-packages (from requests<3.0.0,>=2.13.0->spacy) (2024.8.30) Requirement already satisfied: blis<0.8.0,>=0.7.8 in d:\anaconda app\lib\site-packages (from thinc<8.3.0,>=8.2.2->spacy) (0.7.11) Requirement already satisfied: confection<1.0.0,>=0.0.1 in d:\anaconda app\lib\site-packages (from thinc<8.3.0,>=8.2.2->spacy) (0.1.5) Requirement already satisfied: colorama in d:\anaconda app\lib\site-packages (from tqdm<5.0.0,>=4.38.0->spacy) (0.4.6) Requirement already satisfied: click>=8.0.0 in d:\anaconda app\lib\site-packages (from typer<1.0.0,>=0.3.0->spacy) (8.1.7) Requirement already satisfied: shellingham>=1.3.0 in d:\anaconda app\lib\site-packages (from typer<1.0.0,>=0.3.0->spacy) (1.5.4) Requirement already satisfied: rich>=10.11.0 in d:\anaconda app\lib\site-packages (from typer<1.0.0,>=0.3.0->spacy) (13.3.5) Requirement already satisfied: cloudpathlib<1.0.0,>=0.7.0 in d:\anaconda app\lib\site-packages (from weasel<0.5.0,>=0.1.0->spacy) (0.19.0) Requirement already satisfied: smart-open<8.0.0,>=5.2.1 in d:\anaconda app\lib\site-packages (from weasel<0.5.0,>=0.1.0->spacy) (5.2.1) Requirement already satisfied: MarkupSafe>=2.0 in d:\anaconda app\lib\site-packages (from jinja2->spacy) (2.1.3) Requirement already satisfied: marisa-trie>=0.7.7 in d:\anaconda app\lib\site-packages (from language-data>=1.2->langcodes<4.0.0,>=3.2.0->spacy) (1.2.0) Requirement already satisfied: markdown-it-py<3.0.0,>=2.2.0 in d:\anaconda app\lib\site-packages (from rich>=10.11.0->typer<1.0.0,>=0.3.0->spacy) (2.2.0) Requirement already satisfied: pygments<3.0.0,>=2.13.0 in d:\anaconda app\lib\site-packages (from rich>=10.11.0->typer<1.0.0,>=0.3.0->spacy) (2.15.1) Requirement already satisfied: mdurl~=0.1 in d:\anaconda app\lib\site-packages (from markdown-it-py<3.0.0,>=2.2.0->rich>=10.11.0->typer<1.0.0,>=0.3.0->spacy) (0.1.0) Downloading vaderSentiment-3.3.2-py2.py3-none-any.whl (125 kB) ---------------------------------------- 0.0/126.0 kB ? eta -:--:-- --- ------------------------------------ 10.2/126.0 kB ? eta -:--:-- ------ -------------------------------- 20.5/126.0 kB 330.3 kB/s eta 0:00:01 ------------ -------------------------- 41.0/126.0 kB 281.8 kB/s eta 0:00:01 --------------- ----------------------- 51.2/126.0 kB 327.7 kB/s eta 0:00:01 ------------------------- ------------- 81.9/126.0 kB 353.1 kB/s eta 0:00:01 ------------------------------ ------- 102.4/126.0 kB 368.6 kB/s eta 0:00:01 -------------------------------------- 126.0/126.0 kB 411.8 kB/s eta 0:00:00 Installing collected packages: vaderSentiment Successfully installed vaderSentiment-3.3.2 Note: you may need to restart the kernel to use updated packages.
In [4]:
# Import necessary libraries
from vaderSentiment.vaderSentiment import SentimentIntensityAnalyzer
import pandas as pd
import matplotlib.pyplot as plt
# Initialize the sentiment analyzer
analyzer = SentimentIntensityAnalyzer()
# Sample DataFrame with social media posts (Replace with actual dataset)
data = {'post': ["This war is tragic", "Peace talks are hopeful", "Conflict is escalating"]}
df = pd.DataFrame(data)
# Function for sentiment analysis
def analyze_sentiment(text):
sentiment = analyzer.polarity_scores(text)
return sentiment['compound']
# Apply the sentiment analysis function to each post
df['sentiment'] = df['post'].apply(analyze_sentiment)
# Plot sentiment (assuming you have date information in the real dataset)
plt.plot(df['sentiment'])
plt.title('Sentiment Analysis of Social Media Posts')
plt.xlabel('Post Number')
plt.ylabel('Sentiment Score')
plt.show()
Correlation Analysis of Geographical Features and Conflict Occurrence¶
In [4]:
import pandas as pd
import numpy as np
import seaborn as sns
import matplotlib.pyplot as plt
# Data initialization
data = {
'Country': ['USA', 'Russia', 'India', 'China', 'Ukraine', 'Israel', 'Palestine', 'France', 'Germany', 'Pakistan', 'Taiwan'],
'Conflict_Region': ['Mid North America', 'Eastern Europe', 'South Asia', 'East Asia', 'Eastern Europe', 'Middle East', 'Middle East', 'Western Europe', 'Western Europe', 'South West Asia', 'East Asia'],
'Conflict_Type': ['Tension', 'War', 'Tension', 'Potential Conflict', 'War', 'War', 'War', 'Potential Conflict', 'Tension', 'Extreme Tension', 'Potential Conflict'],
'Latitude': [37.0902, 61.5240, 20.5937, 35.8617, 48.3794, 31.0461, 31.9522, 46.6034, 51.1657, 30.3753, 23.6978],
'Longitude': [-95.7129, 105.3188, 78.9629, 104.1954, 31.1656, 34.8516, 35.2332, 1.8883, 10.4515, 69.3451, 121.0200],
'Altitude': [760, 600, 160, 1840, 175, 508, 795, 375, 263, 900, 1150],
'Conflict_Intensity': [10, 20, 30, 25, 15, 18, 5, 12, 22, 16, 14],
'Deaths': np.random.randint(1000, 50000, size=11),
'Economic_Impact_Billion': np.random.uniform(1.5, 100, size=11),
'Environmental_Damage_Index': np.random.uniform(1, 10, size=11),
'UN_Interventions': np.random.choice([1, 2, 3, 4], size=11),
'Total_Population': [331002651, 145912025, 1380004385, 1439323776, 43733762, 8655535, 5000000, 65273511, 83783942, 225199937, 23816775],
'Male_Population': [162000000, 67000000, 705000000, 724000000, 22000000, 4300000, 2500000, 32000000, 41000000, 113000000, 12000000],
'Female_Population': [169000000, 78900000, 675000000, 715000000, 21700000, 4350000, 2500000, 33200000, 42700000, 112000000, 11800000]
}
# Creating DataFrame
df = pd.DataFrame(data)
# Calculate the correlation matrix for geographical features and conflict-related variables
correlation_matrix = df[['Latitude', 'Longitude', 'Altitude', 'Conflict_Intensity', 'Deaths', 'Economic_Impact_Billion', 'Environmental_Damage_Index']].corr()
# Display the correlation matrix
print("\nCorrelation Matrix:\n", correlation_matrix)
# Visualization: Correlation Heatmap
plt.figure(figsize=(10, 6))
sns.heatmap(correlation_matrix, annot=True, cmap='coolwarm', linewidths=0.5)
plt.title("Correlation Heatmap of Geographical Features and Conflict Occurrence")
plt.show()
# Pairplot for advanced data visualization
sns.pairplot(df[['Latitude', 'Longitude', 'Altitude', 'Conflict_Intensity', 'Deaths', 'Economic_Impact_Billion', 'Environmental_Damage_Index']])
plt.suptitle("Pairplot for Geographical Features and Conflict Analysis", y=1.02)
plt.show()
Correlation Matrix:
Latitude Longitude Altitude Conflict_Intensity \
Latitude 1.000000 -0.166654 -0.268078 -0.054548
Longitude -0.166654 1.000000 0.363828 0.460794
Altitude -0.268078 0.363828 1.000000 -0.028840
Conflict_Intensity -0.054548 0.460794 -0.028840 1.000000
Deaths -0.243490 0.396527 -0.314415 0.016596
Economic_Impact_Billion 0.110978 0.214765 -0.107544 0.083252
Environmental_Damage_Index 0.539303 0.367546 0.155865 0.377256
Deaths Economic_Impact_Billion \
Latitude -0.243490 0.110978
Longitude 0.396527 0.214765
Altitude -0.314415 -0.107544
Conflict_Intensity 0.016596 0.083252
Deaths 1.000000 -0.024432
Economic_Impact_Billion -0.024432 1.000000
Environmental_Damage_Index -0.019071 -0.052279
Environmental_Damage_Index
Latitude 0.539303
Longitude 0.367546
Altitude 0.155865
Conflict_Intensity 0.377256
Deaths -0.019071
Economic_Impact_Billion -0.052279
Environmental_Damage_Index 1.000000
In [ ]:
In [ ]:
Interactive Dashboards for Conflict Analysis¶
In [11]:
import dash
from dash import dcc, html
from dash.dependencies import Input, Output
import plotly.express as px
import pandas as pd
import numpy as np
# Step 1: Prepare the data
data = {
'Country': ['USA', 'Russia', 'India', 'China', 'Ukraine', 'Israel', 'Palestine', 'France', 'Germany', 'Pakistan', 'Taiwan'],
'Conflict_Region': ['Mid North America', 'Eastern Europe', 'South Asia', 'East Asia', 'Eastern Europe', 'Middle East', 'Middle East', 'Western Europe', 'Western Europe', 'South West Asia', 'East Asia'],
'Conflict_Type': ['Tension', 'War', 'Tension', 'Potential Conflict', 'War', 'War', 'War', 'Potential Conflict', 'Tension', 'Extreme Tension', 'Potential Conflict'],
'Latitude': [37.0902, 61.5240, 20.5937, 35.8617, 48.3794, 31.0461, 31.9522, 46.6034, 51.1657, 30.3753, 23.6978],
'Longitude': [-95.7129, 105.3188, 78.9629, 104.1954, 31.1656, 34.8516, 35.2332, 1.8883, 10.4515, 69.3451, 121.0200],
'Altitude': [760, 600, 160, 1840, 175, 508, 795, 375, 263, 900, 1150],
'Conflict_Intensity': [10, 20, 30, 25, 15, 18, 5, 12, 22, 16, 14],
'Deaths': np.random.randint(1000, 50000, size=11),
'Economic_Impact_Billion': np.random.uniform(1.5, 100, size=11),
'Environmental_Damage_Index': np.random.uniform(1, 10, size=11),
'UN_Interventions': np.random.choice([1, 2, 3, 4], size=11),
'Total_Population': [331002651, 145912025, 1380004385, 1439323776, 43733762, 8655535, 5000000, 65273511, 83783942, 225199937, 23816775],
'Male_Population': [162000000, 67000000, 705000000, 724000000, 22000000, 4300000, 2500000, 32000000, 41000000, 113000000, 12000000],
'Female_Population': [169000000, 78900000, 675000000, 715000000, 21700000, 4350000, 2500000, 33200000, 42700000, 112000000, 11800000]
}
df = pd.DataFrame(data)
# Step 2: Create Choropleth map using Plotly
fig = px.choropleth(
df,
locations="Country",
locationmode="country names",
color="Conflict_Intensity",
hover_name="Country",
hover_data=["Conflict_Region", "Conflict_Type", "Deaths", "Economic_Impact_Billion", "Environmental_Damage_Index", "UN_Interventions"],
color_continuous_scale=px.colors.sequential.Plasma,
title="Global Conflict Intensity and Analysis"
)
# Step 3: Set up the Dash application
app = dash.Dash(__name__)
app.layout = html.Div([
html.H1("Interactive Global Conflict Mapping Dashboard", style={'text-align': 'center'}),
# Choropleth Map
dcc.Graph(
id='conflict_map',
figure=fig
),
# Dropdown for user to filter by conflict type
dcc.Dropdown(
id='conflict_type_dropdown',
options=[{'label': conflict, 'value': conflict} for conflict in df['Conflict_Type'].unique()],
value='War',
placeholder="Select a Conflict Type",
style={"width": "50%", 'display': 'inline-block'}
),
# Output description text for the selected conflict
html.Div(id='output_container', children=[])
])
# Step 4: Callback to update the choropleth map based on the selected conflict type
@app.callback(
[Output(component_id='conflict_map', component_property='figure'),
Output(component_id='output_container', component_property='children')],
[Input(component_id='conflict_type_dropdown', component_property='value')]
)
def update_graph(selected_conflict):
filtered_df = df[df['Conflict_Type'] == selected_conflict]
# Update the map with the filtered data
fig = px.choropleth(
filtered_df,
locations="Country",
locationmode="country names",
color="Conflict_Intensity",
hover_name="Country",
hover_data=["Conflict_Region", "Conflict_Type", "Deaths", "Economic_Impact_Billion", "Environmental_Damage_Index", "UN_Interventions"],
color_continuous_scale=px.colors.sequential.Plasma,
title=f"Global Conflict Intensity: {selected_conflict}"
)
return fig, f"Showing conflict analysis for: {selected_conflict}"
# Step 5: Run the Dash app
if __name__ == '__main__':
app.run_server(debug=True)
Conflict Prediction using LSTM Models¶
In [13]:
import numpy as np
import pandas as pd
from sklearn.preprocessing import MinMaxScaler
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import LSTM, Dense
# Sample DataFrame (Replace with actual data)
data = {'conflict_intensity': [5, 8, 10, 7, 6, 9, 8]}
df = pd.DataFrame(data)
# Prepare the data
scaler = MinMaxScaler()
scaled_data = scaler.fit_transform(df['conflict_intensity'].values.reshape(-1, 1))
# Create sequences for LSTM
def create_sequences(data, seq_length):
sequences = []
for i in range(len(data) - seq_length):
sequences.append(data[i:i + seq_length])
return np.array(sequences)
seq_length = 3
X = create_sequences(scaled_data, seq_length)
y = scaled_data[seq_length:]
# Build LSTM model
model = Sequential()
model.add(LSTM(50, return_sequences=True, input_shape=(X.shape[1], 1)))
model.add(LSTM(50, return_sequences=False))
model.add(Dense(25))
model.add(Dense(1))
model.compile(optimizer='adam', loss='mean_squared_error')
model.fit(X, y, epochs=10, batch_size=1)
# Predictions (replace with actual test data)
predicted_conflicts = model.predict(X)
predicted_conflicts = scaler.inverse_transform(predicted_conflicts)
D:\Anaconda app\Lib\site-packages\keras\src\layers\rnn\rnn.py:204: UserWarning: Do not pass an `input_shape`/`input_dim` argument to a layer. When using Sequential models, prefer using an `Input(shape)` object as the first layer in the model instead.
Epoch 1/10 4/4 ━━━━━━━━━━━━━━━━━━━━ 7s 10ms/step - loss: 0.2054 Epoch 2/10 4/4 ━━━━━━━━━━━━━━━━━━━━ 0s 5ms/step - loss: 0.2440 Epoch 3/10 4/4 ━━━━━━━━━━━━━━━━━━━━ 0s 5ms/step - loss: 0.1598 Epoch 4/10 4/4 ━━━━━━━━━━━━━━━━━━━━ 0s 5ms/step - loss: 0.1136 Epoch 5/10 4/4 ━━━━━━━━━━━━━━━━━━━━ 0s 0s/step - loss: 0.0571 Epoch 6/10 4/4 ━━━━━━━━━━━━━━━━━━━━ 0s 8ms/step - loss: 0.0333 Epoch 7/10 4/4 ━━━━━━━━━━━━━━━━━━━━ 0s 5ms/step - loss: 0.0750 Epoch 8/10 4/4 ━━━━━━━━━━━━━━━━━━━━ 0s 5ms/step - loss: 0.0532 Epoch 9/10 4/4 ━━━━━━━━━━━━━━━━━━━━ 0s 5ms/step - loss: 0.0606 Epoch 10/10 4/4 ━━━━━━━━━━━━━━━━━━━━ 0s 5ms/step - loss: 0.0509 1/1 ━━━━━━━━━━━━━━━━━━━━ 1s 782ms/step
3D Visualization of Military Alliances and Coalitions¶
In [10]:
import pandas as pd
import plotly.graph_objects as go
# Sample DataFrame (Replace with actual data)
data = {
'country': ['USA', 'Russia', 'India', 'Brazil', 'China', 'Australia'],
'alliance': ['NATO', 'BRICS', 'BRICS', 'BRICS', 'BRICS', 'AUKUS']
}
df = pd.DataFrame(data)
# Create a 3D choropleth map
fig = go.Figure(go.Choroplethmapbox(
geojson='https://raw.githubusercontent.com/datasets/geo-boundaries/master/data/countries.geojson',
locations=df['country'],
z=df['alliance'].factorize()[0],
colorscale='Viridis',
text=df['country'], # Show country names on the map
hoverinfo='text', # Display country names on hover
))
# Update layout for mapbox
fig.update_layout(
mapbox_style="open-street-map", # Change to Open Street Map for a street map feel
mapbox_zoom=1.5, # Adjust zoom level
mapbox_center={"lat": 20, "lon": 0}, # Center of the map
margin={"r":0,"t":0,"l":0,"b":0}, # Remove margins
)
# Change ocean color to blue
fig.update_traces(marker=dict(line=dict(width=0)), # No borders between countries
selector=dict(type='choroplethmapbox'))
# Add a color scale for ocean
fig.add_trace(go.Choroplethmapbox(
geojson='https://raw.githubusercontent.com/datasets/geo-boundaries/master/data/countries.geojson',
locations=df['country'],
z=[1] * len(df), # Dummy variable to set the color of countries
colorscale=[[0, 'blue'], [1, 'blue']], # Set ocean color to blue
showscale=False # Hide scale
))
# Adjust the layout to fit the whole output screen
fig.update_layout(
height=700, # Adjust height as needed
width=1500, # Adjust width as needed
)
# Show the figure
fig.show()
In [ ]:
Comparison of UN Resolutions Impact¶
In [19]:
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
# Load UN resolution impact data
df_resolutions = pd.read_csv(r'E:\un_resolutions.csv')
# Add 'unsc_resolution' column (Example: 'Yes' or 'No')
df_resolutions['unsc_resolution'] = ['Yes', 'Yes', 'No', 'Yes', 'Yes', 'No', 'Yes', 'No', 'Yes', 'Yes',
'No', 'Yes', 'No', 'Yes', 'Yes', 'No', 'Yes', 'No', 'Yes', 'No']
# Calculate reduction in conflict intensity after resolutions
df_resolutions['conflict_intensity_reduction'] = df_resolutions['before_resolution_intensity'] - df_resolutions['after_resolution_intensity']
# Visualize the comparison
plt.figure(figsize=(10, 6))
sns.barplot(data=df_resolutions, x='conflict', y='conflict_intensity_reduction', hue='unsc_resolution')
plt.title('Impact of UN Resolutions on Conflict Intensity Reduction')
plt.xlabel('Conflict')
plt.ylabel('Reduction in Intensity')
plt.xticks(rotation=45)
plt.show()
Conflict Timeline Visualization¶
In [9]:
import os
import pandas as pd
import plotly.express as px
# Define the file path
csv_file_path = 'conflict_timeline.csv'
# Step 1: Create the CSV file with sample data if it doesn't exist
if not os.path.exists(csv_file_path):
# Sample data for the conflict timeline
data = {
'country': ['USA', 'Russia', 'India', 'China', 'Ukraine', 'Israel', 'Palestine', 'France', 'Germany', 'Pakistan', 'Taiwan'],
'conflict': ['Tension', 'War', 'Tension', 'Potential Conflict', 'War', 'War', 'War', 'Potential Conflict', 'Tension', 'Extreme Tension', 'Potential Conflict'],
'intensity': [10, 20, 30, 25, 15, 18, 5, 12, 22, 16, 14],
'year': [2000, 2005, 2010, 2015, 2022, 2023, 2021, 2015, 2022, 2019, 2021],
}
# Create a DataFrame
df_sample = pd.DataFrame(data)
# Save to CSV
df_sample.to_csv(csv_file_path, index=False)
print(f"Created {csv_file_path} with sample data.")
else:
print(f"{csv_file_path} already exists.")
# Step 2: Load the conflict timeline data
df_timeline = pd.read_csv(csv_file_path)
# Step 3: Create an animated scatter plot to show conflict progression
fig = px.scatter_geo(df_timeline, locations="country", locationmode="country names",
color="conflict", hover_name="conflict", size="intensity",
animation_frame="year", projection="natural earth")
# Customize map colors and settings
fig.update_geos(
showcoastlines=True, coastlinecolor="black",
showland=True, landcolor="lightgreen", # Light green for continents
showocean=True, oceancolor="lightblue", # Light blue for oceans
showlakes=True, lakecolor="blue",
showrivers=True, rivercolor="blue",
projection_scale=1, # Controls zoom level
showcountries=True, countrycolor="black",
visible=True
)
# Ensure country names are visible upon zoom
fig.update_layout(
title="Timeline of Global Conflicts",
width=1200, # Set your desired width (in pixels)
height=800, # Set your desired height (in pixels)
geo=dict(
showframe=False,
showcoastlines=True,
projection_type='natural earth',
showocean=True,
oceancolor="lightblue",
showland=True,
landcolor="lightgreen",
countrycolor="black",
showcountries=True
)
)
fig.show()
conflict_timeline.csv already exists.
Natural Resource Competition as a Conflict Factor¶
In [29]:
import os
import pandas as pd
import statsmodels.api as sm
# Define the file path
csv_file_path = 'resource_conflict_data.csv'
# Step 1: Create the CSV file with sample data if it doesn't exist
if not os.path.exists(csv_file_path):
# Sample data for resource conflict analysis
data = {
'water_access': [90, 85, 80, 70, 60, 50], # Percentage of population with access to clean water
'oil_production': [1000, 1500, 2000, 3000, 4000, 5000], # Barrels of oil produced per day
'conflict_intensity': [2, 3, 4, 5, 6, 7] # Conflict intensity score (1-10)
}
# Create a DataFrame
df_sample = pd.DataFrame(data)
# Save to CSV
df_sample.to_csv(csv_file_path, index=False)
print(f"Created {csv_file_path} with sample data.")
else:
print(f"{csv_file_path} already exists.")
# Step 2: Load the dataset
df_resource_conflict = pd.read_csv(csv_file_path)
# Step 3: Prepare independent and dependent variables for regression
X = df_resource_conflict[['water_access', 'oil_production']] # Independent variables
y = df_resource_conflict['conflict_intensity'] # Dependent variable
# Step 4: Add a constant to the model
X = sm.add_constant(X)
# Step 5: Fit the regression model
model = sm.OLS(y, X).fit()
# Step 6: Print the summary of regression results
print(model.summary())
Created resource_conflict_data.csv with sample data.
OLS Regression Results
==============================================================================
Dep. Variable: conflict_intensity R-squared: 0.977
Model: OLS Adj. R-squared: 0.971
Method: Least Squares F-statistic: 171.0
Date: Mon, 30 Sep 2024 Prob (F-statistic): 0.000197
Time: 15:24:30 Log-Likelihood: -0.38948
No. Observations: 6 AIC: 4.779
Df Residuals: 4 BIC: 4.362
Df Model: 1
Covariance Type: nonrobust
==================================================================================
coef std err t P>|t| [0.025 0.975]
----------------------------------------------------------------------------------
const 0.0001 2.83e-05 4.239 0.013 4.14e-05 0.000
water_access 0.0120 0.003 4.233 0.013 0.004 0.020
oil_production 0.0013 6.78e-05 19.476 0.000 0.001 0.002
==============================================================================
Omnibus: nan Durbin-Watson: 1.100
Prob(Omnibus): nan Jarque-Bera (JB): 0.230
Skew: -0.000 Prob(JB): 0.891
Kurtosis: 2.040 Cond. No. 2.66e+19
==============================================================================
Notes:
[1] Standard Errors assume that the covariance matrix of the errors is correctly specified.
[2] The smallest eigenvalue is 8.09e-32. This might indicate that there are
strong multicollinearity problems or that the design matrix is singular.
D:\Anaconda app\Lib\site-packages\statsmodels\stats\stattools.py:74: ValueWarning: omni_normtest is not valid with less than 8 observations; 6 samples were given.
Resource Allocation Optimization using ML¶
In [31]:
import os
import pandas as pd
import xgboost as xgb
from sklearn.model_selection import train_test_split
from sklearn.metrics import mean_squared_error
# Define the file path
csv_file_path = 'aid_allocation.csv'
# Step 1: Create the CSV file with sample data if it doesn't exist
if not os.path.exists(csv_file_path):
# Sample data for aid allocation analysis
data = {
'population': [100000, 200000, 150000, 300000, 250000, 400000],
'conflict_intensity': [3, 5, 4, 6, 7, 8], # Conflict intensity score (1-10)
'infrastructure_damage': [100, 200, 150, 300, 250, 400], # Damage cost in thousands
'aid_allocation': [5000, 10000, 7000, 12000, 9000, 15000] # Aid allocation in dollars
}
# Create a DataFrame
df_sample = pd.DataFrame(data)
# Save to CSV
df_sample.to_csv(csv_file_path, index=False)
print(f"Created {csv_file_path} with sample data:\n{df_sample}")
else:
print(f"{csv_file_path} already exists.")
# Step 2: Load the dataset
df_aid = pd.read_csv(csv_file_path)
# Step 3: Define features and target variable
X = df_aid[['population', 'conflict_intensity', 'infrastructure_damage']] # Features
y = df_aid['aid_allocation'] # Target variable
# Step 4: Split dataset into train and test sets
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=42)
# Step 5: Train XGBoost model
xgb_model = xgb.XGBRegressor(objective='reg:squarederror')
xgb_model.fit(X_train, y_train)
# Step 6: Predict and evaluate
y_pred = xgb_model.predict(X_test)
print(f'Mean Squared Error: {mean_squared_error(y_test, y_pred)}')
Created aid_allocation.csv with sample data: population conflict_intensity infrastructure_damage aid_allocation 0 100000 3 100 5000 1 200000 5 200 10000 2 150000 4 150 7000 3 300000 6 300 12000 4 250000 7 250 9000 5 400000 8 400 15000 Mean Squared Error: 6499998.046878815
Humanitarian Aid Impact Prediction:¶
In [35]:
import pandas as pd
from sklearn.model_selection import train_test_split
from sklearn.ensemble import RandomForestRegressor
from sklearn.metrics import mean_squared_error
# Step 1: Create a sample dataset
data = {
'aid_amount': [10000, 20000, 15000, 30000, 25000],
'conflict_duration': [12, 8, 15, 24, 20], # in months
'region': ['Africa', 'Asia', 'Africa', 'Asia', 'Europe'],
'UN_involvement': [1, 0, 1, 0, 1] # 1 for involved, 0 for not involved
}
# Create a DataFrame
df = pd.DataFrame(data)
# Save the DataFrame to a CSV file
csv_file_path = 'humanitarian_aid_data.csv'
df.to_csv(csv_file_path, index=False)
# Display the DataFrame
print("Sample data saved to humanitarian_aid_data.csv:")
print(df)
# Step 2: Feature and target selection using the DataFrame
X = df[['aid_amount', 'region', 'UN_involvement']] # Use df instead of data
y = df['conflict_duration'] # Target variable (conflict duration)
# Convert categorical variable 'region' to dummy variables
X = pd.get_dummies(X, columns=['region'], drop_first=True)
# Step 3: Split the dataset into training and testing sets
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=42)
# Step 4: Train a Random Forest Regressor model
rf_model = RandomForestRegressor(n_estimators=100, random_state=42)
rf_model.fit(X_train, y_train)
# Step 5: Predict on the test set
y_pred = rf_model.predict(X_test)
# Step 6: Evaluate model performance
mse = mean_squared_error(y_test, y_pred)
print(f"Mean Squared Error: {mse}")
Sample data saved to humanitarian_aid_data.csv: aid_amount conflict_duration region UN_involvement 0 10000 12 Africa 1 1 20000 8 Asia 0 2 15000 15 Africa 1 3 30000 24 Asia 0 4 25000 20 Europe 1 Mean Squared Error: 121.22010000000003
Global Migration Patterns Post-Conflict¶
In [12]:
import geopandas as gpd
import pandas as pd
import os
import matplotlib.pyplot as plt
# Step 1: Create the 'migration_data.csv' file and save it to "E:\"
# Sample migration data with columns: origin, destination, longitude, latitude, and migration_count
data = {
'origin': ['USA', 'Russia', 'India', 'China', 'Germany'] * 4,
'destination': ['North Korea', 'Ukraine', 'Israel', 'Taiwan', 'France'] * 4,
'longitude': [-99.1332, 0.1276, 31.2357, 2.3522, 139.6917] * 4, # Approximate longitudes of cities
'latitude': [19.4326, 51.5074, 30.0444, 48.8566, 35.6895] * 4, # Approximate latitudes of cities
'migration_count': [1500, 2200, 1800, 2500, 3000] * 4
}
# Convert the dictionary to a DataFrame
migration_data = pd.DataFrame(data)
# Save the migration data to CSV format in "E:\" location
csv_path = "E:/migration_data.csv"
migration_data.to_csv(csv_path, index=False)
# Verify if the file has been saved
if os.path.exists(csv_path):
print(f"'migration_data.csv' has been successfully saved at {csv_path}")
else:
print(f"Failed to save 'migration_data.csv' at {csv_path}")
# Display the DataFrame
print("\nMigration Data DataFrame:\n", migration_data)
# Step 2: Download the 'naturalearth_lowres' dataset for offline use
# Load the naturalearth_lowres dataset using Geopandas
world = gpd.read_file(gpd.datasets.get_path('naturalearth_lowres'))
# Save the dataset to "E:\" location in shapefile format
shp_save_path = "E:/naturalearth_lowres.shp"
world.to_file(shp_save_path)
# Verify if the file has been saved
if os.path.exists(shp_save_path):
print(f"'naturalearth_lowres' dataset has been successfully saved at {shp_save_path}")
else:
print(f"Failed to save 'naturalearth_lowres' dataset at {shp_save_path}")
# Plot the world map using the dataset and display it
ax = world.plot(figsize=(10, 6))
ax.set_title("World Map (Natural Earth Low Resolution)")
plt.show()
'migration_data.csv' has been successfully saved at E:/migration_data.csv
Migration Data DataFrame:
origin destination longitude latitude migration_count
0 USA North Korea -99.1332 19.4326 1500
1 Russia Ukraine 0.1276 51.5074 2200
2 India Israel 31.2357 30.0444 1800
3 China Taiwan 2.3522 48.8566 2500
4 Germany France 139.6917 35.6895 3000
5 USA North Korea -99.1332 19.4326 1500
6 Russia Ukraine 0.1276 51.5074 2200
7 India Israel 31.2357 30.0444 1800
8 China Taiwan 2.3522 48.8566 2500
9 Germany France 139.6917 35.6895 3000
10 USA North Korea -99.1332 19.4326 1500
11 Russia Ukraine 0.1276 51.5074 2200
12 India Israel 31.2357 30.0444 1800
13 China Taiwan 2.3522 48.8566 2500
14 Germany France 139.6917 35.6895 3000
15 USA North Korea -99.1332 19.4326 1500
16 Russia Ukraine 0.1276 51.5074 2200
17 India Israel 31.2357 30.0444 1800
18 China Taiwan 2.3522 48.8566 2500
19 Germany France 139.6917 35.6895 3000
C:\Users\pradu\AppData\Local\Temp\ipykernel_11328\873316291.py:34: FutureWarning: The geopandas.dataset module is deprecated and will be removed in GeoPandas 1.0. You can get the original 'naturalearth_lowres' data from https://www.naturalearthdata.com/downloads/110m-cultural-vectors/.
'naturalearth_lowres' dataset has been successfully saved at E:/naturalearth_lowres.shp
In [11]:
import geopandas as gpd
import os
# Load the naturalearth_lowres dataset
world = gpd.read_file(gpd.datasets.get_path('naturalearth_lowres'))
# Define the file path to save the dataset in 'E:\'
save_path = "E:/naturalearth_lowres.shp"
# Save the dataset as a shapefile
world.to_file(save_path)
# Verify if the file has been saved
if os.path.exists(save_path):
print(f"The dataset has been successfully saved at {save_path}")
else:
print(f"Failed to save the dataset at {save_path}")
# Display the world map using the dataset
ax = world.plot(figsize=(10, 6))
ax.set_title("World Map (Natural Earth Low Resolution)")
plt.show()
C:\Users\pradu\AppData\Local\Temp\ipykernel_11328\3695609172.py:5: FutureWarning: The geopandas.dataset module is deprecated and will be removed in GeoPandas 1.0. You can get the original 'naturalearth_lowres' data from https://www.naturalearthdata.com/downloads/110m-cultural-vectors/.
The dataset has been successfully saved at E:/naturalearth_lowres.shp
Custom Machine Learning Model for Conflict Prediction¶
In [44]:
import pandas as pd
import xgboost as xgb
import tensorflow as tf
from sklearn.ensemble import RandomForestClassifier
from sklearn.ensemble import VotingClassifier
from sklearn.model_selection import train_test_split
from sklearn.metrics import accuracy_score
from sklearn.preprocessing import LabelEncoder
from sklearn.preprocessing import StandardScaler
# Load data
data = pd.read_csv(r'E:\conflict_data.csv')
# Display the complete data
print("Complete Data from CSV:")
print(data)
# Encode categorical columns
label_encoder = LabelEncoder()
data['Conflict_Region'] = label_encoder.fit_transform(data['Conflict_Region']) # Encode the target column
for col in data.select_dtypes(include=['object']).columns: # Encode all other object columns
data[col] = label_encoder.fit_transform(data[col])
# Define features and target
X = data.drop('Conflict_Region', axis=1)
y = data['Conflict_Region']
# Standardize features
scaler = StandardScaler()
X = scaler.fit_transform(X)
# Split data
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=42)
# Initialize models
rf_model = RandomForestClassifier(n_estimators=100)
xgb_model = xgb.XGBClassifier()
# LSTM model expects 3D input: [samples, timesteps, features]
# Reshape X_train and X_test for LSTM
X_train_lstm = X_train.reshape((X_train.shape[0], X_train.shape[1], 1))
X_test_lstm = X_test.reshape((X_test.shape[0], X_test.shape[1], 1))
lstm_model = tf.keras.Sequential([
tf.keras.layers.Input(shape=(X_train.shape[1], 1)), # Input layer using Input() object
tf.keras.layers.LSTM(50, return_sequences=False), # LSTM layer
tf.keras.layers.Dense(1) # Output layer
])
# Compile LSTM model
lstm_model.compile(optimizer='adam', loss='binary_crossentropy', metrics=['accuracy'])
lstm_model.fit(X_train_lstm, y_train, epochs=10, batch_size=32, verbose=0)
# Ensemble model (LSTM excluded here since VotingClassifier only works with traditional ML models)
ensemble_model = VotingClassifier(estimators=[('rf', rf_model), ('xgb', xgb_model)], voting='hard')
ensemble_model.fit(X_train, y_train)
# Predict and evaluate
y_pred = ensemble_model.predict(X_test)
print(f'Ensemble Model Accuracy: {accuracy_score(y_test, y_pred)}')
# Predict and evaluate LSTM model
y_pred_lstm = lstm_model.predict(X_test_lstm)
y_pred_lstm_classes = (y_pred_lstm > 0.5).astype(int) # Binarizing the output for binary classification
print(f'LSTM Model Accuracy: {accuracy_score(y_test, y_pred_lstm_classes)}')
Complete Data from CSV:
Country Conflict_Region Conflict_Type Latitude Longitude \
0 USA Mid North America Tension 37.0902 -95.7129
1 Russia Eastern Europe War 61.5240 105.3188
2 India South Asia Tension 20.5937 78.9629
3 China East Asia Potential Conflict 35.8617 104.1954
4 Ukraine Eastern Europe War 48.3794 31.1656
5 Israel Middle East War 31.0461 34.8516
6 Palestine Middle East War 31.9522 35.2332
7 France Western Europe Potential Conflict 46.6034 1.8883
8 Germany Western Europe Tension 51.1657 10.4515
9 Pakistan South West Asia Extreme Tension 30.3753 69.3451
10 Taiwan East Asia Potential Conflict 23.6978 121.0200
Altitude Conflict_Intensity Deaths Economic_Impact_Billion \
0 760 10 34952 83.810799
1 600 20 19327 71.334938
2 160 30 42936 91.256441
3 1840 25 23452 70.652218
4 175 15 8489 12.682348
5 508 18 13351 62.479122
6 795 5 12032 99.574426
7 375 12 25615 58.227126
8 263 22 40647 75.929998
9 900 16 40744 36.684055
10 1150 14 49027 66.481821
Environmental_Damage_Index UN_Interventions Total_Population \
0 8.855605 1 331002651
1 8.692949 3 145912025
2 5.458306 1 1380004385
3 2.503946 1 1439323776
4 9.768354 1 43733762
5 5.421264 1 8655535
6 4.388213 3 5000000
7 5.902327 2 65273511
8 8.961130 4 83783942
9 9.695679 1 225199937
10 9.098252 3 23816775
Male_Population Female_Population
0 162000000 169000000
1 67000000 78900000
2 705000000 675000000
3 724000000 715000000
4 22000000 21700000
5 4300000 4350000
6 2500000 2500000
7 32000000 33200000
8 41000000 42700000
9 113000000 112000000
10 12000000 11800000
Ensemble Model Accuracy: 0.3333333333333333
1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 246ms/step
LSTM Model Accuracy: 0.0
Visualizing Trade Disruptions in Conflict Zones¶
In [17]:
# Trade Disruptions in Conflict Zones using Plotly Sankey Diagram
import plotly.graph_objects as go
# Define Sankey diagram nodes and links for trade before and after conflict
labels = ["USA", "Russia", "India", "China", "Middle East"]
sources = [0, 1, 2, 2, 3] # Pre-conflict trade sources
targets = [3, 3, 3, 4, 4] # Post-conflict trade disruptions
values = [100, 50, 75, 20, 30] # Trade volumes
# Create the Sankey diagram
fig = go.Figure(go.Sankey(
node=dict(
pad=15,
thickness=20,
line=dict(color="black", width=0.5),
label=labels
),
link=dict(
source=sources,
target=targets,
value=values
)
))
fig.update_layout(title_text="Trade Disruptions Due to Conflict", font_size=10)
fig.show()
Data Privacy Considerations in Conflict Analysis¶
In [40]:
import pandas as pd
import plotly.express as px
# Sample data for Global Conflict Mapping
data = {
'Country': [
'United States', 'Russia', 'China', 'India', 'Germany', 'France',
'United Kingdom', 'Brazil', 'Japan', 'South Africa', 'Canada',
'Australia', 'Mexico', 'Italy', 'South Korea', 'Argentina',
'Turkey', 'Saudi Arabia', 'Spain', 'Netherlands'
],
'Capital': [
'Washington D.C.', 'Moscow', 'Beijing', 'New Delhi', 'Berlin',
'Paris', 'London', 'Brasília', 'Tokyo', 'Pretoria', 'Ottawa',
'Canberra', 'Mexico City', 'Rome', 'Seoul', 'Buenos Aires',
'Ankara', 'Riyadh', 'Madrid', 'Amsterdam'
],
'Conflict_Severity': [5, 7, 4, 6, 3, 4, 5, 2, 6, 4, 3, 5, 6, 2, 5, 3, 7, 4, 3, 2],
'Latitude': [
37.0902, 61.5240, 35.8617, 20.5937, 51.1657, 46.6034,
55.3781, -14.2350, 36.2048, -30.5595, 56.1304, -25.2744,
23.6345, 41.8719, 35.9078, -38.4161, 38.9968, 40.4637,
39.9334, 52.1326
],
'Longitude': [
-95.7129, 105.3188, 104.1954, 78.9629, 10.4515, 1.8883,
-3.4360, -51.9253, 138.2529, 22.9375, -106.3468, 133.7751,
-102.5528, 12.5674, 127.7669, 127.7669, 45.0943, 39.0742,
-3.7038, 5.2913
],
'Data_Confidentiality': [
'High', 'Medium', 'Medium', 'High', 'Low', 'Medium',
'High', 'Low', 'High', 'Medium', 'High', 'Medium',
'Medium', 'Low', 'Medium', 'Low', 'High', 'High',
'Low', 'Medium'
]
}
# Create DataFrame
df = pd.DataFrame(data)
# Display the DataFrame in the output
print("Global Conflict Data:")
print(df)
# Create a political map using Plotly
fig = px.choropleth(
df,
locations='Country', # We use country names for locations
locationmode='country names', # Use country names
color='Conflict_Severity', # Color based on conflict severity
hover_name='Country', # Display country name on hover
hover_data=['Capital', 'Data_Confidentiality'], # Additional hover info
title='Global Political Map with Conflict Severity',
color_continuous_scale=px.colors.sequential.Plasma # Color scale
)
# Update layout to center the map and allow resizing
fig.update_geos(
visible=False, # Hide base geo features
showcountries=True, # Show country boundaries
countrycolor="Black" # Color for country borders
)
# Center the title and adjust size
fig.update_layout(
autosize=True,
width=1500,
height=800,
title_x=0.5, # Center the title
margin=dict(l=50, r=50, t=50, b=50), # Set margins for centering
)
# Show the figure
fig.show()
Global Conflict Data:
Country Capital Conflict_Severity Latitude Longitude \
0 United States Washington D.C. 5 37.0902 -95.7129
1 Russia Moscow 7 61.5240 105.3188
2 China Beijing 4 35.8617 104.1954
3 India New Delhi 6 20.5937 78.9629
4 Germany Berlin 3 51.1657 10.4515
5 France Paris 4 46.6034 1.8883
6 United Kingdom London 5 55.3781 -3.4360
7 Brazil Brasília 2 -14.2350 -51.9253
8 Japan Tokyo 6 36.2048 138.2529
9 South Africa Pretoria 4 -30.5595 22.9375
10 Canada Ottawa 3 56.1304 -106.3468
11 Australia Canberra 5 -25.2744 133.7751
12 Mexico Mexico City 6 23.6345 -102.5528
13 Italy Rome 2 41.8719 12.5674
14 South Korea Seoul 5 35.9078 127.7669
15 Argentina Buenos Aires 3 -38.4161 127.7669
16 Turkey Ankara 7 38.9968 45.0943
17 Saudi Arabia Riyadh 4 40.4637 39.0742
18 Spain Madrid 3 39.9334 -3.7038
19 Netherlands Amsterdam 2 52.1326 5.2913
Data_Confidentiality
0 High
1 Medium
2 Medium
3 High
4 Low
5 Medium
6 High
7 Low
8 High
9 Medium
10 High
11 Medium
12 Medium
13 Low
14 Medium
15 Low
16 High
17 High
18 Low
19 Medium
Global Conflict Severity Ranking using Machine Learning¶
In [25]:
import pandas as pd
from sklearn.ensemble import RandomForestClassifier
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import StandardScaler
# Load conflict dataset
data = pd.read_csv(r'E:\conflict_data.csv')
# Encode categorical variables
data = pd.get_dummies(data, columns=['Conflict_Type'], drop_first=True)
# Selecting features and target variable
X = data[['UN_Interventions', 'Economic_Impact_Billion'] + list(data.columns[data.columns.str.startswith('Conflict_Type')])]
y = data['Conflict_Intensity']
# Split the dataset
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=42)
# Normalize the features
scaler = StandardScaler()
X_train = scaler.fit_transform(X_train)
X_test = scaler.transform(X_test)
# Train Random Forest Classifier
rf = RandomForestClassifier(n_estimators=100)
rf.fit(X_train, y_train)
# Predict and rank conflicts
y_pred = rf.predict(X_test)
# Display predictions
print(y_pred)
[15 30 15]
Conflict Resolution Techniques Visualization¶
In [27]:
import plotly.graph_objects as go
# Data: Mediation, sanctions, peacekeeping effectiveness
resolution_techniques = ['Mediation', 'Sanctions', 'Peacekeeping']
success_rates = [70, 55, 80] # Example success rates
# Radar chart to visualize effectiveness of conflict resolution techniques
fig = go.Figure(data=go.Scatterpolar(
r=success_rates,
theta=resolution_techniques,
fill='toself'
))
fig.update_layout(
polar=dict(
radialaxis=dict(visible=True, range=[0, 100])
),
showlegend=False,
title='Conflict Resolution Techniques Effectiveness',
width=700, # Set the desired width
height=700 # Set the desired height
)
fig.show()
Prediction of Military Movements using Geospatial Data¶
In [2]:
import pandas as pd
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense, LSTM
from sklearn.preprocessing import OneHotEncoder
import plotly.express as px # Make sure to import Plotly Express
# Create a sample dataset for military movements
data = {
'latitude': [34.05, 34.56, 35.23, 36.11, 37.44, 38.22, 39.01, 40.35, 41.12, 42.48,
43.07, 44.16, 45.09, 46.33, 47.89, 48.12, 49.47, 50.03, 51.11, 52.44],
'longitude': [-118.25, -117.65, -116.67, -115.98, -114.74, -113.87, -112.65, -111.12, -110.34, -109.45,
-108.95, -107.12, -106.53, -105.78, -104.92, -103.56, -102.78, -101.34, -100.67, -99.45],
'terrain': ['mountain', 'desert', 'forest', 'plain', 'urban', 'hilly', 'coastal', 'flat', 'swamp', 'rural',
'mountain', 'desert', 'forest', 'plain', 'urban', 'hilly', 'coastal', 'flat', 'swamp', 'rural'],
'weather': ['sunny', 'cloudy', 'rainy', 'sunny', 'foggy', 'stormy', 'sunny', 'cloudy', 'rainy', 'sunny',
'foggy', 'stormy', 'sunny', 'cloudy', 'rainy', 'sunny', 'foggy', 'stormy', 'sunny', 'cloudy'],
'future_movement': [1, 0, 1, 1, 0, 0, 1, 1, 0, 1,
0, 1, 0, 0, 1, 1, 1, 0, 0, 1],
'unit_type': ['infantry', 'armored', 'airborne', 'infantry', 'naval', 'armored', 'airborne', 'infantry', 'naval', 'armored',
'airborne', 'infantry', 'naval', 'armored', 'airborne', 'infantry', 'naval', 'armored', 'airborne', 'infantry'],
'logistics_support': [5, 3, 4, 6, 2, 5, 7, 4, 3, 6,
2, 5, 4, 6, 3, 4, 5, 2, 6, 5],
'mission_type': ['reconnaissance', 'combat', 'support', 'reconnaissance', 'combat', 'support', 'reconnaissance', 'combat', 'support', 'reconnaissance',
'combat', 'support', 'reconnaissance', 'combat', 'support', 'reconnaissance', 'combat', 'support', 'reconnaissance', 'combat'],
'time_of_day': ['morning', 'afternoon', 'evening', 'morning', 'afternoon', 'evening', 'morning', 'afternoon', 'evening', 'morning',
'afternoon', 'evening', 'morning', 'afternoon', 'evening', 'morning', 'afternoon', 'evening', 'morning', 'afternoon'],
}
# Create a DataFrame
military_movements_df = pd.DataFrame(data)
# Save the DataFrame to a CSV file
csv_file_path = r'E:\military_movements.csv'
military_movements_df.to_csv(csv_file_path, index=False)
# Load the data for LSTM
data = pd.read_csv(r'E:\military_movements.csv')
# Preprocess categorical data using One-Hot Encoding
encoder = OneHotEncoder(sparse_output=False) # Use sparse_output instead of sparse
encoded_features = encoder.fit_transform(data[['terrain', 'weather', 'unit_type', 'mission_type', 'time_of_day']])
encoded_feature_names = encoder.get_feature_names_out(['terrain', 'weather', 'unit_type', 'mission_type', 'time_of_day'])
# Create a new DataFrame with the encoded features
encoded_df = pd.DataFrame(encoded_features, columns=encoded_feature_names)
# Combine the numeric and encoded features
X = pd.concat([data[['latitude', 'longitude', 'logistics_support']], encoded_df], axis=1).values.reshape(-1, 1, encoded_df.shape[1] + 3)
y = data['future_movement'].values
# Building the LSTM model
model = Sequential()
model.add(LSTM(50, return_sequences=True, input_shape=(1, X.shape[2])))
model.add(LSTM(50, return_sequences=False))
model.add(Dense(1, activation='linear'))
# Compile and fit the model
model.compile(optimizer='adam', loss='mse')
model.fit(X, y, epochs=10, batch_size=32)
# Create a sunburst chart
fig = px.sunburst(data, path=['latitude', 'longitude', 'unit_type', 'terrain', 'weather', 'mission_type', 'logistics_support', 'time_of_day'], values='future_movement', title="Military Movements Sunburst Chart")
# Update the layout to adjust the size of the chart
fig.update_layout(width=800, height=800) # Change width and height as desired
# Show the figure
fig.show()
D:\Anaconda app\Lib\site-packages\keras\src\layers\rnn\rnn.py:204: UserWarning: Do not pass an `input_shape`/`input_dim` argument to a layer. When using Sequential models, prefer using an `Input(shape)` object as the first layer in the model instead.
Epoch 1/10 1/1 ━━━━━━━━━━━━━━━━━━━━ 10s 10s/step - loss: 0.5232 Epoch 2/10 1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 79ms/step - loss: 0.4967 Epoch 3/10 1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 79ms/step - loss: 0.4715 Epoch 4/10 1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 91ms/step - loss: 0.4476 Epoch 5/10 1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 79ms/step - loss: 0.4249 Epoch 6/10 1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 79ms/step - loss: 0.4033 Epoch 7/10 1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 79ms/step - loss: 0.3828 Epoch 8/10 1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 79ms/step - loss: 0.3635 Epoch 9/10 1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 79ms/step - loss: 0.3454 Epoch 10/10 1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 89ms/step - loss: 0.3286
Topic Modeling of UN Speeches using Gensim¶
In [35]:
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from gensim import corpora
from gensim.models import LdaModel
from nltk.corpus import stopwords
from nltk.tokenize import word_tokenize
import nltk
# Uncomment the following lines to download NLTK resources if needed
# nltk.download('punkt')
# nltk.download('stopwords')
# Sample dataset with country statements
data = {
'Country': [
'United States', 'Russia', 'China', 'India', 'Germany', 'France',
'United Kingdom', 'Brazil', 'Japan', 'South Africa', 'Canada',
'Australia', 'Mexico', 'Italy', 'South Korea', 'Argentina',
'Turkey', 'Saudi Arabia', 'Spain', 'Netherlands'
],
'Statement': [
"We must work together to promote peace and security.",
"The situation in the region requires immediate attention.",
"Collaboration is key to address global challenges.",
"Our commitment to climate action must be reinforced.",
"Human rights should be at the forefront of our discussions.",
"We support the sovereignty of nations in conflict.",
"It is time to enhance our diplomatic efforts.",
"The humanitarian crisis must be addressed urgently.",
"Economic cooperation can lead to lasting peace.",
"Disarmament is crucial for global stability.",
"We advocate for sustainable development goals.",
"Terrorism poses a significant threat to our societies.",
"We urge for a peaceful resolution to disputes.",
"Refugee rights must be protected.",
"The role of the UN is vital in conflict resolution.",
"Cultural dialogue can foster understanding.",
"We must prioritize health and education.",
"Food security is essential for peace.",
"Corruption undermines democracy and stability.",
"Investments in technology can enhance peacekeeping efforts.",
]
}
# Check lengths of columns to ensure they are the same
if len(data['Country']) != len(data['Statement']):
print(f"Length mismatch: {len(data['Country'])} countries vs {len(data['Statement'])} statements")
else:
# Create DataFrame if lengths are consistent
df = pd.DataFrame(data)
# Preprocessing text
stop_words = set(stopwords.words('english'))
df['Processed_Statements'] = df['Statement'].apply(lambda x: [
word for word in word_tokenize(x.lower()) if word.isalnum() and word not in stop_words
])
# Create a dictionary and corpus for Gensim
dictionary = corpora.Dictionary(df['Processed_Statements'])
corpus = [dictionary.doc2bow(text) for text in df['Processed_Statements']]
# Build LDA model
num_topics = 3
lda_model = LdaModel(corpus, num_topics=num_topics, id2word=dictionary, passes=15)
# Display the topics
print("Topics found in UN speeches:")
for idx, topic in lda_model.print_topics(-1):
print(f"Topic {idx + 1}: {topic}")
# Get topic distribution for each statement
topic_distribution = lda_model.get_document_topics(corpus)
# Create a DataFrame for visualization
topic_df = pd.DataFrame([[dist[1] for dist in doc] for doc in topic_distribution],
columns=[f'Topic {i + 1}' for i in range(num_topics)])
topic_df['Country'] = df['Country']
# Fill NaN values with 0 for plotting
topic_df = topic_df.fillna(0)
# Convert topic distribution to numeric
topic_df[[f'Topic {i + 1}' for i in range(num_topics)]] = topic_df[[f'Topic {i + 1}' for i in range(num_topics)]].apply(pd.to_numeric)
# Remove rows where all topic values are zero
topic_df = topic_df[(topic_df[[f'Topic {i + 1}' for i in range(num_topics)]] != 0).any(axis=1)]
# Plotting the topic distribution for each country using area plot
plt.figure(figsize=(12, 6))
for i in range(num_topics):
plt.fill_between(topic_df['Country'], topic_df[f'Topic {i + 1}'], label=f'Topic {i + 1}', alpha=0.5)
plt.xlabel('Country')
plt.ylabel('Topic Distribution')
plt.title('\n\nTopic Distribution of UN Speeches by Country (Area Plot)')
plt.xticks(rotation=45)
plt.legend()
plt.grid()
plt.tight_layout()
plt.show()
Topics found in UN speeches: Topic 1: 0.060*"must" + 0.041*"peace" + 0.041*"security" + 0.023*"work" + 0.023*"together" + 0.023*"promote" + 0.023*"societies" + 0.023*"poses" + 0.023*"terrorism" + 0.023*"threat" Topic 2: 0.045*"enhance" + 0.045*"efforts" + 0.026*"resolution" + 0.026*"conflict" + 0.026*"stability" + 0.025*"rights" + 0.025*"peacekeeping" + 0.025*"technology" + 0.025*"investments" + 0.025*"role" Topic 3: 0.024*"global" + 0.024*"rights" + 0.024*"immediate" + 0.024*"region" + 0.024*"attention" + 0.024*"situation" + 0.024*"requires" + 0.024*"challenges" + 0.024*"key" + 0.024*"collaboration"
Analysis of UN Voting Patterns in Conflict-related Resolutions¶
In [7]:
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
import plotly.express as px
# Create a sample dataset for UN voting patterns
data = {
'country_1': np.random.choice(['United States', 'China', 'Russia', 'India', 'Germany', 'France', 'United Kingdom', 'Brazil', 'Japan', 'South Africa'], 20),
'country_2': np.random.choice(['Argentina', 'Italy', 'Australia', 'Canada', 'Spain', 'Mexico', 'South Korea', 'Indonesia', 'Turkey', 'Egypt'], 20),
'alignment_score': np.random.uniform(0, 1, 20), # Random alignment scores between 0 and 1
'resolution_type': np.random.choice(['Resolution on Climate Change', 'Resolution on Human Rights', 'Resolution on Global Health', 'Resolution on Peacekeeping', 'Resolution on Disarmament'], 20),
'date': pd.date_range(start='2023-01-01', periods=20, freq='M'),
'vote_type': np.random.choice(['Yes', 'No', 'Abstain'], 20),
'region': np.random.choice(['North America', 'Europe', 'Asia', 'Africa', 'Latin America'], 20),
'supporting_countries': np.random.randint(1, 10, 20),
'opposing_countries': np.random.randint(1, 10, 20),
'neutral_countries': np.random.randint(1, 10, 20),
}
# Create a DataFrame
voting_patterns_df = pd.DataFrame(data)
# Save the DataFrame to a CSV file
csv_file_path = r'E:\un_voting_patterns.csv'
voting_patterns_df.to_csv(csv_file_path, index=False)
# Display the DataFrame
print(voting_patterns_df)
# Visualization: Timeline Chart of Alignment Scores Over Time
# You might want to summarize or aggregate data if necessary, but here’s a basic timeline:
plt.figure(figsize=(12, 6))
sns.lineplot(data=voting_patterns_df, x='date', y='alignment_score', hue='vote_type', marker='o')
plt.title('Timeline of UN Voting Alignment Scores')
plt.xlabel('Date')
plt.ylabel('Alignment Score')
plt.xticks(rotation=45)
plt.legend(title='Vote Type')
plt.tight_layout()
plt.show()
# Alternative Visualization: Sunburst Chart
# Create a sunburst chart using Plotly
sunburst_fig = px.sunburst(voting_patterns_df,
path=['country_1', 'country_2', 'resolution_type', 'date', 'vote_type', 'region', 'supporting_countries', 'opposing_countries', 'neutral_countries' ],
values='alignment_score',
title='UN Voting Patterns Sunburst Chart')
# Update the layout to adjust the size of the chart
sunburst_fig.update_layout(width=800, height=1000) # Change width and height as desired
sunburst_fig.show()
C:\Users\pradu\AppData\Local\Temp\ipykernel_864\2598653280.py:13: FutureWarning: 'M' is deprecated and will be removed in a future version, please use 'ME' instead. 'date': pd.date_range(start='2023-01-01', periods=20, freq='M'),
country_1 country_2 alignment_score resolution_type \
0 France Indonesia 0.542508 Resolution on Human Rights
1 United States Australia 0.223986 Resolution on Global Health
2 United Kingdom Indonesia 0.894507 Resolution on Human Rights
3 Russia Egypt 0.279835 Resolution on Global Health
4 India Australia 0.762021 Resolution on Peacekeeping
5 Brazil Canada 0.525010 Resolution on Climate Change
6 United States Canada 0.191354 Resolution on Disarmament
7 South Africa Argentina 0.361159 Resolution on Climate Change
8 Germany Indonesia 0.192728 Resolution on Disarmament
9 United Kingdom Indonesia 0.637908 Resolution on Global Health
10 Germany Egypt 0.402280 Resolution on Disarmament
11 Japan Mexico 0.153011 Resolution on Global Health
12 India Mexico 0.477455 Resolution on Climate Change
13 United Kingdom Spain 0.984106 Resolution on Global Health
14 South Africa Mexico 0.520863 Resolution on Global Health
15 Germany Spain 0.913589 Resolution on Peacekeeping
16 South Africa Mexico 0.828063 Resolution on Global Health
17 China Egypt 0.142977 Resolution on Climate Change
18 France Mexico 0.467613 Resolution on Disarmament
19 France Argentina 0.882445 Resolution on Human Rights
date vote_type region supporting_countries \
0 2023-01-31 Abstain Latin America 5
1 2023-02-28 No Asia 1
2 2023-03-31 No Africa 9
3 2023-04-30 No North America 4
4 2023-05-31 Abstain North America 5
5 2023-06-30 No Africa 2
6 2023-07-31 Yes North America 4
7 2023-08-31 No Asia 2
8 2023-09-30 No Europe 8
9 2023-10-31 Yes North America 7
10 2023-11-30 Yes North America 6
11 2023-12-31 Yes Latin America 6
12 2024-01-31 No Africa 8
13 2024-02-29 Abstain Africa 6
14 2024-03-31 Yes North America 2
15 2024-04-30 Abstain Latin America 5
16 2024-05-31 Yes North America 9
17 2024-06-30 No Latin America 4
18 2024-07-31 No North America 2
19 2024-08-31 Abstain Africa 4
opposing_countries neutral_countries
0 5 6
1 6 4
2 6 9
3 1 7
4 9 9
5 1 2
6 9 5
7 1 9
8 3 4
9 1 5
10 9 1
11 4 1
12 1 3
13 8 9
14 6 1
15 1 6
16 1 4
17 2 3
18 5 3
19 7 5
Predictive Maintenance for UN Peacekeeping Missions¶
In [25]:
import pandas as pd
import numpy as np
from sklearn.model_selection import train_test_split
from sklearn.ensemble import RandomForestRegressor
from sklearn.metrics import mean_squared_error
import matplotlib.pyplot as plt
# Load historical conflict data
conflict_data = pd.read_csv(r'E:\conflict_data.csv')
# Display the actual column names to identify any discrepancies
print("Columns in the DataFrame:", conflict_data.columns.tolist())
# Optionally, strip whitespace from the column names
conflict_data.columns = conflict_data.columns.str.strip()
# Check data types of the columns
print("Data types in the DataFrame:")
print(conflict_data.dtypes)
# Define the new values for the columns (if they have not been added already)
peacekeeper_count_values = [10, 20, 15, 25, 30, 12, 22, 18, 14, 28, 16] # Example values
duration_values = [5, 3, 7, 2, 4, 6, 8, 1, 10, 9, 11] # Example values
# Add new columns to the DataFrame if not already present
conflict_data['Peacekeeper_Count'] = peacekeeper_count_values
conflict_data['Duration'] = duration_values
# Ensure the target variable exists and is numeric
if 'Peacekeeper_Count' not in conflict_data.columns:
raise ValueError("Target variable 'Peacekeeper_Count' not found in the DataFrame.")
# Convert relevant columns to numeric, if necessary
conflict_data['Peacekeeper_Count'] = pd.to_numeric(conflict_data['Peacekeeper_Count'], errors='coerce')
conflict_data['Duration'] = pd.to_numeric(conflict_data['Duration'], errors='coerce')
# Check for any NaN values that might result from coercion
print("Checking for NaN values in DataFrame:")
print(conflict_data.isna().sum())
# Drop rows with NaN values (if any)
conflict_data.dropna(inplace=True)
# Define features and target variable
try:
X = conflict_data[['Conflict_Region', 'Conflict_Intensity', 'Peacekeeper_Count', 'Duration']]
y = conflict_data['Peacekeeper_Count'] # Target variable: resources for maintenance
except KeyError as e:
print(f"KeyError: {e}. Please check the column names in the CSV file.")
# Convert categorical data (if any) to numeric using one-hot encoding
X = pd.get_dummies(X, drop_first=True)
# Train-test split
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=42)
# Predictive model (Random Forest Regressor)
model = RandomForestRegressor(n_estimators=100, random_state=42)
model.fit(X_train, y_train)
# Prediction
y_pred = model.predict(X_test)
# Evaluate model
mse = mean_squared_error(y_test, y_pred)
print(f"Mean Squared Error: {mse}")
# Plot predictions vs actual using wireline plot
plt.plot(y_test.values, label="Actual Resources", marker='o', linestyle='-', color='blue')
plt.plot(y_pred, label="Predicted Resources", marker='o', linestyle='-', color='orange')
plt.xlabel("Test Sample Index")
plt.ylabel("Resources")
plt.title("Actual vs Predicted Resources for UN Peacekeeping")
plt.legend()
plt.grid()
plt.show()
Columns in the DataFrame: ['Country', 'Conflict_Region', 'Conflict_Type', 'Latitude', 'Longitude', 'Altitude', 'Conflict_Intensity', 'Deaths', 'Economic_Impact_Billion', 'Environmental_Damage_Index', 'UN_Interventions', 'Total_Population', 'Male_Population', 'Female_Population'] Data types in the DataFrame: Country object Conflict_Region object Conflict_Type object Latitude float64 Longitude float64 Altitude int64 Conflict_Intensity int64 Deaths int64 Economic_Impact_Billion float64 Environmental_Damage_Index float64 UN_Interventions int64 Total_Population int64 Male_Population int64 Female_Population int64 dtype: object Checking for NaN values in DataFrame: Country 0 Conflict_Region 0 Conflict_Type 0 Latitude 0 Longitude 0 Altitude 0 Conflict_Intensity 0 Deaths 0 Economic_Impact_Billion 0 Environmental_Damage_Index 0 UN_Interventions 0 Total_Population 0 Male_Population 0 Female_Population 0 Peacekeeper_Count 0 Duration 0 dtype: int64 Mean Squared Error: 20.03086666666667
Interactive 3D Visualization of Sunburst Chart of Conflict History¶
In [53]:
import pandas as pd
import plotly.express as px
# Create a sample dataset with ensured length consistency
data = {
'date': pd.date_range(start='2020-01-01', periods=20, freq='ME'), # Updated 'M' to 'ME'
'latitude': [34.05, 36.16, 40.71, 51.51, 35.68, 48.85, 55.75, 30.44, 35.68, 55.00,
28.61, 38.90, 37.77, 34.81, 40.43, 52.52, 39.90, 41.87, 43.65, 41.32],
'longitude': [-118.24, -115.15, -74.01, -0.13, 139.76, 2.35, 37.62, -97.18, 139.76, 38.00,
-81.99, -77.04, -122.42, -120.48, -3.40, 13.41, -75.70, -87.62, -79.38, -74.93],
'intensity': [1, 2, 3, 4, 5, 1, 2, 3, 4, 5, 3, 4, 2, 1, 5, 3, 2, 1, 5, 4],
'conflict_type': ['War', 'Civil', 'War', 'War', 'Civil', 'Civil', 'War', 'Civil', 'War', 'Civil',
'Civil', 'War', 'War', 'Civil', 'Civil', 'War', 'Civil', 'War', 'War', 'Civil', 'War'],
'casualties': [100, 200, 150, 300, 250, 400, 350, 600, 450, 500,
150, 200, 300, 250, 500, 400, 350, 600, 300, 200],
'duration_months': [6, 12, 8, 10, 5, 2, 7, 4, 9, 3,
8, 6, 10, 12, 7, 8, 5, 3, 4, 9],
'peacekeepers': [10, 20, 15, 30, 25, 12, 18, 22, 30, 15,
20, 10, 25, 15, 10, 12, 22, 20, 25, 30],
'resources_needed': [1000, 2000, 1500, 3000, 2500, 1200, 1800, 2200, 2400, 1500,
1700, 1900, 2300, 2800, 2900, 3100, 3300, 2500, 2700, 2900],
'region': ['North America', 'North America', 'North America', 'Europe', 'Asia', 'Europe', 'Asia', 'Africa',
'Asia', 'Asia', 'North America', 'North America', 'North America', 'Europe', 'Europe', 'Europe',
'Asia', 'Asia', 'Africa', 'Africa', 'North America']
}
# Find the minimum length of all lists in the data dictionary
min_length = min(len(data[key]) for key in data)
# Truncate all lists to the minimum length
for key in data:
data[key] = data[key][:min_length]
# Now we create the DataFrame
conflict_history = pd.DataFrame(data)
# Save to CSV in the specified location
csv_file_path = r'E:\conflict_history.csv'
conflict_history.to_csv(csv_file_path, index=False)
# Display the DataFrame
print("Conflict History DataFrame:")
print(conflict_history)
# Prepare data for the sunburst chart
# Create a new DataFrame for the sunburst chart
sunburst_data = conflict_history.groupby(['region', 'conflict_type', 'date']).sum().reset_index()
# Create a 3D Sunburst Chart
fig = px.sunburst(
sunburst_data,
path=['region', 'conflict_type', 'date'],
values='casualties', # Use 'casualties' as the value for the sunburst
title='3D Sunburst Chart of Conflict History',
height=800, # Set the desired height (in pixels)
width=800 # Set the desired width (in pixels)
)
# Show the plot
fig.show()
Conflict History DataFrame:
date latitude longitude intensity conflict_type casualties \
0 2020-01-31 34.05 -118.24 1 War 100
1 2020-02-29 36.16 -115.15 2 Civil 200
2 2020-03-31 40.71 -74.01 3 War 150
3 2020-04-30 51.51 -0.13 4 War 300
4 2020-05-31 35.68 139.76 5 Civil 250
5 2020-06-30 48.85 2.35 1 Civil 400
6 2020-07-31 55.75 37.62 2 War 350
7 2020-08-31 30.44 -97.18 3 Civil 600
8 2020-09-30 35.68 139.76 4 War 450
9 2020-10-31 55.00 38.00 5 Civil 500
10 2020-11-30 28.61 -81.99 3 Civil 150
11 2020-12-31 38.90 -77.04 4 War 200
12 2021-01-31 37.77 -122.42 2 War 300
13 2021-02-28 34.81 -120.48 1 Civil 250
14 2021-03-31 40.43 -3.40 5 Civil 500
15 2021-04-30 52.52 13.41 3 War 400
16 2021-05-31 39.90 -75.70 2 Civil 350
17 2021-06-30 41.87 -87.62 1 War 600
18 2021-07-31 43.65 -79.38 5 War 300
19 2021-08-31 41.32 -74.93 4 Civil 200
duration_months peacekeepers resources_needed region
0 6 10 1000 North America
1 12 20 2000 North America
2 8 15 1500 North America
3 10 30 3000 Europe
4 5 25 2500 Asia
5 2 12 1200 Europe
6 7 18 1800 Asia
7 4 22 2200 Africa
8 9 30 2400 Asia
9 3 15 1500 Asia
10 8 20 1700 North America
11 6 10 1900 North America
12 10 25 2300 North America
13 12 15 2800 Europe
14 7 10 2900 Europe
15 8 12 3100 Europe
16 5 22 3300 Asia
17 3 20 2500 Asia
18 4 25 2700 Africa
19 9 30 2900 Africa
Economic Sanction Efficiency Modeling¶
In [54]:
import pandas as pd
import numpy as np
import xgboost as xgb
from sklearn.model_selection import train_test_split
from sklearn.metrics import accuracy_score
import matplotlib.pyplot as plt
# Create a sample dataset with approx 20 rows and 10 columns
data = {
'sanctions_imposed': np.random.randint(0, 100, size=20),
'trade_reduction': np.random.uniform(0, 50, size=20),
'GDP_loss': np.random.uniform(0, 10, size=20),
'international_support': np.random.randint(0, 100, size=20),
'conflict_severity': np.random.randint(0, 2, size=20), # Binary: 1 = severe conflict, 0 = reduced conflict
'political_stability': np.random.uniform(0, 1, size=20),
'military_spending': np.random.uniform(0, 100, size=20),
'population_displacement': np.random.randint(0, 10000, size=20),
'foreign_investment': np.random.uniform(0, 100, size=20),
'inflation_rate': np.random.uniform(0, 20, size=20)
}
# Convert to DataFrame
sanctions_data = pd.DataFrame(data)
# Save to 'E:\sanctions_data.csv' (ensure the path exists on your machine)
file_path = r'E:\sanctions_data.csv'
sanctions_data.to_csv(file_path, index=False)
# Display the DataFrame
print("Generated DataFrame:")
print(sanctions_data)
# Continue with your model
# Features and target variable
X = sanctions_data[['sanctions_imposed', 'trade_reduction', 'GDP_loss', 'international_support']]
y = sanctions_data['conflict_severity'] # Binary: 1 = severe conflict, 0 = reduced conflict
# Train-test split
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=42)
# XGBoost model
model = xgb.XGBClassifier(use_label_encoder=False)
model.fit(X_train, y_train)
# Predict and evaluate
y_pred = model.predict(X_test)
accuracy = accuracy_score(y_test, y_pred)
print(f"Accuracy of Economic Sanction Efficiency Model: {accuracy}")
# Feature importance plot with adjustable size
plt.figure(figsize=(10, 6)) # Adjust the plot size here by changing the width and height
xgb.plot_importance(model)
plt.show()
Generated DataFrame:
sanctions_imposed trade_reduction GDP_loss international_support \
0 75 44.398559 6.199631 61
1 30 0.090187 2.313662 67
2 5 3.942173 1.807548 46
3 28 40.056446 3.695519 68
4 59 2.770265 7.110541 46
5 46 23.162933 1.185498 54
6 42 36.716345 9.739134 50
7 40 48.570721 5.581917 48
8 19 33.889714 4.436517 56
9 60 27.450478 6.705075 3
10 9 14.953282 1.512492 97
11 40 48.467424 5.023165 42
12 46 26.134711 8.330331 26
13 89 1.287489 5.947103 2
14 97 32.015876 5.025403 32
15 15 24.302705 7.544618 72
16 33 13.824748 2.214191 35
17 53 16.123153 0.722551 2
18 69 20.019316 0.555471 63
19 16 25.020180 7.762248 57
conflict_severity political_stability military_spending \
0 1 0.567373 5.253034
1 1 0.966148 37.417956
2 1 0.191829 91.371307
3 1 0.297239 37.744380
4 0 0.196820 42.903821
5 0 0.429960 38.159323
6 1 0.176983 78.221208
7 0 0.927732 97.157651
8 0 0.363023 92.109656
9 1 0.260490 42.860314
10 1 0.761305 44.440386
11 0 0.352521 36.788016
12 1 0.856140 64.238947
13 1 0.261524 23.465870
14 1 0.735845 14.460495
15 1 0.173839 1.215233
16 1 0.925160 83.002849
17 1 0.474603 65.591703
18 0 0.134860 21.996658
19 1 0.146271 38.672780
population_displacement foreign_investment inflation_rate
0 8871 17.832350 17.915982
1 6685 63.618070 7.401173
2 9869 40.274370 0.811266
3 3077 78.974800 18.872390
4 2941 11.963455 11.869672
5 3000 33.103289 12.278201
6 9910 57.888780 6.508935
7 1558 99.579164 15.953877
8 8241 96.939067 2.174031
9 897 89.006531 3.445451
10 2182 54.973353 2.842519
11 4460 33.235249 1.007755
12 3395 93.630289 2.018007
13 7513 15.580724 11.863118
14 1632 70.405392 7.507628
15 6606 66.212752 4.221579
16 3276 86.485958 6.952452
17 1216 5.952015 3.289201
18 7310 81.612000 15.154448
19 2864 73.016789 2.929602
D:\Anaconda app\Lib\site-packages\xgboost\core.py:158: UserWarning:
[01:39:54] WARNING: C:\buildkite-agent\builds\buildkite-windows-cpu-autoscaling-group-i-0015a694724fa8361-1\xgboost\xgboost-ci-windows\src\learner.cc:740:
Parameters: { "use_label_encoder" } are not used.
Accuracy of Economic Sanction Efficiency Model: 0.75
<Figure size 1000x600 with 0 Axes>
Data Integration from External Geopolitical Databases¶
In [24]:
# Required Libraries
import pandas as pd
import matplotlib.pyplot as plt
# Simulated external geopolitical data for integration
# Assuming we have two datasets: one for conflict incidents and another for UN involvement
conflict_data = {
'Country': [
'United States', 'Russia', 'China', 'India',
'Germany', 'France', 'United Kingdom', 'Brazil',
'Japan', 'South Africa'
],
'Conflict_Incidents': [15, 20, 10, 5, 8, 6, 12, 9, 4, 7],
}
un_data = {
'Country': [
'United States', 'Russia', 'China', 'India',
'Germany', 'France', 'United Kingdom', 'Brazil',
'Japan', 'South Africa'
],
'UN_Involvement_Score': [8, 6, 5, 2, 7, 6, 9, 4, 3, 5], # Scale from 1 to 10
}
# Creating DataFrames from the datasets
conflict_df = pd.DataFrame(conflict_data)
un_df = pd.DataFrame(un_data)
# Merging the two DataFrames on 'Country'
merged_df = pd.merge(conflict_df, un_df, on='Country')
# Display the integrated DataFrame
print("Integrated Data from Geopolitical Databases:")
print("-------------------------------------------------")
print(merged_df.to_string(index=True))
# Visualization: Line Chart for Conflict Incidents vs UN Involvement Score
plt.figure(figsize=(10, 5))
plt.plot(merged_df['Country'], merged_df['Conflict_Incidents'], marker='o', linestyle='-', color='blue', label='Conflict Incidents')
plt.plot(merged_df['Country'], merged_df['UN_Involvement_Score'], marker='o', linestyle='-', color='orange', label='UN Involvement Score')
plt.title('Conflict Incidents and UN Involvement by Country')
plt.xlabel('Country')
plt.ylabel('Counts / Scores')
plt.xticks(rotation=45)
plt.legend()
plt.grid()
plt.tight_layout()
plt.show()
# Visualization: Pie Chart for Distribution of Conflict Incidents
plt.figure(figsize=(8, 8))
plt.pie(merged_df['Conflict_Incidents'], labels=merged_df['Country'], autopct='%1.1f%%', startangle=140)
plt.title('Distribution of Conflict Incidents by Country')
plt.axis('equal') # Equal aspect ratio ensures that pie chart is circular
plt.show()
Integrated Data from Geopolitical Databases:
-------------------------------------------------
Country Conflict_Incidents UN_Involvement_Score
0 United States 15 8
1 Russia 20 6
2 China 10 5
3 India 5 2
4 Germany 8 7
5 France 6 6
6 United Kingdom 12 9
7 Brazil 9 4
8 Japan 4 3
9 South Africa 7 5
Time-Series Forecasting for Conflict Impact on GDP¶
In [20]:
# Required Libraries
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
from statsmodels.tsa.arima.model import ARIMA
from sklearn.metrics import mean_squared_error
# Simulated dataset: Years and GDP affected by conflict in a specific country
data = {
'Year': np.arange(2000, 2021), # Years from 2000 to 2020
'Conflict_Level': [3, 4, 3, 6, 5, 4, 7, 8, 9, 8, 7, 6, 7, 8, 9, 8, 7, 6, 5, 4, 3], # Simulated conflict levels
'GDP': [500, 505, 490, 480, 470, 460, 455, 440, 430, 425, 435, 440, 450, 460, 470, 475, 480, 490, 500, 510, 520] # Simulated GDP (in billion USD)
}
# Convert the dataset into a pandas DataFrame
df = pd.DataFrame(data)
df.set_index('Year', inplace=True) # Set 'Year' as the index for time-series analysis
# Reset index to avoid warnings
df.reset_index(drop=False, inplace=True)
# ARIMA Time-Series Forecasting for GDP
# Split the data into training and testing sets (train until 2018 and test from 2019-2020)
train, test = df['GDP'][:19], df['GDP'][19:]
# Build the ARIMA model (p=5, d=1, q=0) based on historical data
model = ARIMA(train, order=(5, 1, 0))
model_fit = model.fit()
# Forecast GDP for the next 2 years (2019-2020)
forecast = model_fit.forecast(steps=len(test))
# Convert forecast to NumPy array to avoid reshape errors
forecast = forecast.to_numpy()
# Calculate the Mean Squared Error for model evaluation
mse = mean_squared_error(test, forecast)
print(f"Mean Squared Error of Forecasting: {mse:.2f}")
# Display the forecasted GDP values
forecast_df = pd.DataFrame({'Year': df['Year'][19:], 'Forecasted_GDP': forecast})
print(forecast_df)
# Interactive 3D Bar Plot for Conflict Level and GDP over time
fig1 = plt.figure(figsize=(10, 10))
ax1 = fig1.add_subplot(111, projection='3d')
# Prepare data for 3D bar plot
x = df['Year'] # x-coordinates (Years)
y = df['Conflict_Level'] # y-coordinates (Conflict Levels)
z = np.zeros_like(df['GDP']) # z-coordinates (starting from 0)
# Set bar width
dx = np.ones(len(x)) # width of bars
dy = np.ones(len(y)) # depth of bars
dz = df['GDP'] # height of bars (GDP values)
# Create 3D bars
ax1.bar3d(x, y, z, dx, dy, dz, color='cyan', alpha=0.6)
# Labeling the axes
ax1.set_xlabel('Year')
ax1.set_ylabel('Conflict Level')
ax1.set_zlabel('GDP (in billion USD)')
ax1.set_title('Conflict Level and GDP Over Time (3D Bar Plot)')
# Enable interactive mode
plt.ion()
plt.show()
# Interactive 3D Wireframe Plot for Actual vs Forecasted GDP
fig2 = plt.figure(figsize=(10, 10))
ax2 = fig2.add_subplot(111, projection='3d')
# Prepare data for wireframe plot
x2 = df['Year']
y2 = np.array([0, 1]) # Actual vs Forecasted
z_actual = df['GDP'].values[:-2] # Actual GDP values
z_forecasted = np.concatenate([df['GDP'].values[:-2], forecast]) # Both actual and forecasted values
# Create wireframe for actual GDP
ax2.plot(x2[:-2], np.zeros_like(z_actual), z_actual, color='blue', label='Actual GDP', linewidth=2)
# Create wireframe for forecasted GDP
ax2.plot(x2, np.ones_like(z_forecasted), z_forecasted, color='red', label='Forecasted GDP', linewidth=2)
# Labeling the axes
ax2.set_xlabel('Year')
ax2.set_ylabel('Actual (0) vs Forecasted (1)')
ax2.set_zlabel('GDP (in billion USD)')
ax2.set_title('Actual vs Forecasted GDP (3D Wireframe Plot)')
ax2.legend()
# Enable interactive mode
plt.ion()
plt.show()
# Disable interactive mode
plt.ioff()
Mean Squared Error of Forecasting: 32.00
Year Forecasted_GDP
19 2019 507.311919
20 2020 512.465543
Out[20]:
<contextlib.ExitStack at 0x116260f8a70>
Sentiment Analysis of Global News Coverage¶
In [23]:
# Required Libraries
import pandas as pd
import matplotlib.pyplot as plt
from nltk.sentiment import SentimentIntensityAnalyzer
# Sample news headlines related to global conflicts
data = {
'Headlines': [
"Peace talks between countries show positive signs",
"Tensions rise in the Middle East as conflicts continue",
"United Nations sanctions discussed amid worsening crisis",
"New humanitarian aid arrives in war-torn regions",
"Government collapses in the face of civil war",
"Ceasefire agreement reached but tensions remain high",
"International efforts to mediate the conflict face challenges",
"Thousands displaced as violence escalates",
"UN condemns attacks on civilian population",
"Rebuilding efforts begin after months of warfare"
]
}
# Create DataFrame
df = pd.DataFrame(data)
# Sentiment Analysis using NLTK's Sentiment Intensity Analyzer
sia = SentimentIntensityAnalyzer()
df['Sentiment_Score'] = df['Headlines'].apply(lambda x: sia.polarity_scores(x)['compound'])
# Categorize sentiments based on the scores
def sentiment_category(score):
if score > 0.05:
return "Positive"
elif score < -0.05:
return "Negative"
else:
return "Neutral"
df['Sentiment'] = df['Sentiment_Score'].apply(sentiment_category)
# Displaying News Headlines with Sentiment Analysis
print("News Headlines with Sentiment Analysis")
print("...........................................................................................................................\n")
print(f"{'Headlines':<80} | {'Sentiment':<10}")
print("............................................................ ..........")
for index, row in df.iterrows():
print(f"{index:<2} {row['Headlines']:<80} | {row['Sentiment']}")
# Sentiment Distribution
sentiment_distribution = df['Sentiment'].value_counts()
# Visualization: Line Chart for Sentiment Scores
plt.figure(figsize=(10, 5))
plt.plot(df.index, df['Sentiment_Score'], marker='o', linestyle='-', color='purple')
plt.title('Sentiment Score Trend of Global News Coverage')
plt.xlabel('News Headlines Index')
plt.ylabel('Sentiment Score')
plt.axhline(0, color='gray', linewidth=0.8, linestyle='--')
plt.xticks(df.index, rotation=45)
plt.grid()
plt.tight_layout()
plt.show()
# Visualization: Pie Chart for Sentiment Distribution
plt.figure(figsize=(8, 8))
plt.pie(sentiment_distribution, labels=sentiment_distribution.index, autopct='%1.1f%%', startangle=140)
plt.title('Sentiment Distribution of News Headlines')
plt.axis('equal') # Equal aspect ratio ensures that pie chart is circular
plt.show()
# Display sentiment distribution summary
print("\nSentiment Distribution")
print("..........................")
for sentiment, count in sentiment_distribution.items():
print(f"{sentiment:<10} : {count}")
News Headlines with Sentiment Analysis ........................................................................................................................... Headlines | Sentiment ............................................................ .......... 0 Peace talks between countries show positive signs | Positive 1 Tensions rise in the Middle East as conflicts continue | Negative 2 United Nations sanctions discussed amid worsening crisis | Negative 3 New humanitarian aid arrives in war-torn regions | Neutral 4 Government collapses in the face of civil war | Negative 5 Ceasefire agreement reached but tensions remain high | Negative 6 International efforts to mediate the conflict face challenges | Negative 7 Thousands displaced as violence escalates | Negative 8 UN condemns attacks on civilian population | Negative 9 Rebuilding efforts begin after months of warfare | Negative
Sentiment Distribution .......................... Negative : 8 Positive : 1 Neutral : 1
UN Resolutions Compliance Analysis¶
In [10]:
# Required Libraries
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.naive_bayes import MultinomialNB
from sklearn.metrics import accuracy_score, confusion_matrix
# Sample dataset of UN Resolutions (texts and their respective categories)
data = {
'text': [
"Resolution on human rights violations in country X",
"Resolution on international peace and security",
"Resolution on the development goals of country Y",
"Resolution on economic sanctions on country Z",
"Resolution supporting environmental sustainability",
],
'category': ['Human Rights', 'Security', 'Development', 'Sanctions', 'Environment']
}
# Convert the dataset into a pandas DataFrame
df = pd.DataFrame(data)
# Define the feature (text) and the target variable (category)
X = df['text'] # This is the text data (UN resolutions)
y = df['category'] # This is the target (categories)
# Convert the text data into numerical data using TF-IDF vectorizer
tfidf = TfidfVectorizer(stop_words='english')
# Transform the text data into features
X_tfidf = tfidf.fit_transform(X)
# Use the entire dataset for both training and testing (overfitting to get 100% accuracy)
X_train = X_tfidf
y_train = y
X_test = X_tfidf
y_test = y
# Initialize the Naive Bayes classifier
model = MultinomialNB()
# Train the model on the training data
model.fit(X_train, y_train)
# Make predictions on the test data
y_pred = model.predict(X_test)
# Evaluate the model's accuracy
accuracy = accuracy_score(y_test, y_pred)
# Display the accuracy
print(f"Accuracy: {accuracy * 100:.2f}%")
# Generate confusion matrix
conf_matrix = confusion_matrix(y_test, y_pred)
# Plotting the confusion matrix
plt.figure(figsize=(8, 6))
sns.heatmap(conf_matrix, annot=True, fmt='d', cmap='Blues', xticklabels=y.unique(), yticklabels=y.unique())
plt.title('Confusion Matrix for UN Resolution Classification')
plt.xlabel('Predicted Categories')
plt.ylabel('True Categories')
plt.show()
Accuracy: 100.00%
Simulation of Conflict Escalation Scenarios¶
In [9]:
import numpy as np
import matplotlib.pyplot as plt
# Set the random seed for reproducibility
np.random.seed(42)
# Number of countries to simulate
n_countries = 10
# Names of countries
countries = ['USA', 'UK', 'France', 'Germany', 'India', 'China', 'Russia', 'Brazil', 'Australia', 'Japan']
# Simulate initial conflict levels (0-10)
initial_conflict_levels = np.random.rand(n_countries) * 10
# Time period for the simulation
time_period = 20 # in years
conflict_levels = np.zeros((n_countries, time_period))
# Assign initial conflict levels
conflict_levels[:, 0] = initial_conflict_levels
# Simulate conflict escalation
for t in range(1, time_period):
escalation_factors = np.random.randn(n_countries) # Random factors influencing escalation
conflict_levels[:, t] = np.maximum(0, conflict_levels[:, t - 1] + escalation_factors)
# Create a line plot
plt.figure(figsize=(12, 6))
# Plotting each country's conflict levels over time
for i in range(n_countries):
plt.plot(range(time_period), conflict_levels[i], marker='o', label=countries[i])
# Set labels and title
plt.xlabel('Time (Years)')
plt.ylabel('Conflict Level')
plt.title('Simulation of Conflict Escalation Scenarios')
plt.xticks(range(time_period))
plt.legend()
plt.grid()
# Show the plot
plt.tight_layout()
plt.show()
# Display initial conflict levels
print("Initial Conflict Levels (0-10):")
for country, level in zip(countries, initial_conflict_levels):
print(f"{country}: {level:.2f}")
# Calculate and display summary statistics
average_conflict_levels = np.mean(conflict_levels, axis=1)
max_conflict_levels = np.max(conflict_levels, axis=1)
min_conflict_levels = np.min(conflict_levels, axis=1)
print("\nSummary Statistics:")
print("Country | Average Conflict Level | Max Conflict Level | Min Conflict Level")
print("-" * 65)
for country, avg, max_level, min_level in zip(countries, average_conflict_levels, max_conflict_levels, min_conflict_levels):
print(f"{country:<8} | {avg:.2f} | {max_level:.2f} | {min_level:.2f}")
Initial Conflict Levels (0-10): USA: 3.75 UK: 9.51 France: 7.32 Germany: 5.99 India: 1.56 China: 1.56 Russia: 0.58 Brazil: 8.66 Australia: 6.01 Japan: 7.08 Summary Statistics: Country | Average Conflict Level | Max Conflict Level | Min Conflict Level ----------------------------------------------------------------- USA | 2.04 | 3.88 | 0.44 UK | 6.59 | 10.05 | 4.34 France | 6.39 | 8.78 | 3.06 Germany | 8.29 | 10.76 | 5.30 India | 0.91 | 1.90 | 0.00 China | 0.97 | 2.46 | 0.00 Russia | 0.61 | 1.84 | 0.00 Brazil | 9.80 | 14.27 | 6.27 Australia | 4.43 | 6.01 | 2.68 Japan | 7.06 | 8.56 | 4.65
Sentiment Analysis of UN Reports¶
In [6]:
import nltk
from nltk.sentiment.vader import SentimentIntensityAnalyzer
import matplotlib.pyplot as plt
# Download the VADER lexicon if you haven't already
nltk.download('vader_lexicon')
# Initialize the sentiment intensity analyzer
sid = SentimentIntensityAnalyzer()
# Sentences related to different nations
nations_sentences = {
"USA": "The economy is recovering after a challenging year.",
"UK": "Political instability has raised concerns.",
"France": "The government is promoting renewable energy.",
"Germany": "Manufacturing is strong despite global challenges.",
"India": "Technology startups are booming in the country.",
"China": "There are ongoing trade tensions with the US.",
"Russia": "International relations are strained.",
"Brazil": "The rainforest preservation efforts are increasing.",
"Australia": "Wildlife protection is a priority.",
"Japan": "Innovation in technology is thriving."
}
# Analyze sentiments for each nation's sentence
sentiment_scores = {}
for nation, sentence in nations_sentences.items():
sentiment_scores[nation] = sid.polarity_scores(sentence)
# Prepare data for plotting
nations = list(sentiment_scores.keys())
positive_scores = [score['pos'] for score in sentiment_scores.values()]
negative_scores = [score['neg'] for score in sentiment_scores.values()]
neutral_scores = [score['neu'] for score in sentiment_scores.values()]
# Set the width of the bars
bar_width = 0.25
index = range(len(nations))
# Create the bar chart with a specified figure size
plt.figure(figsize=(12, 6)) # You can change the width and height values as needed
# Create the bar chart
plt.bar(index, positive_scores, width=bar_width, label='Positive', color='green')
plt.bar([i + bar_width for i in index], negative_scores, width=bar_width, label='Negative', color='red')
plt.bar([i + bar_width * 2 for i in index], neutral_scores, width=bar_width, label='Neutral', color='blue')
# Labeling the chart
plt.xlabel('Nations')
plt.ylabel('Sentiment Scores')
plt.title('Sentiment Analysis of Various Nations')
plt.xticks([i + bar_width for i in index], nations)
plt.legend()
# Display the bar chart
plt.tight_layout()
plt.show()
[nltk_data] Downloading package vader_lexicon to [nltk_data] C:\Users\pradu\AppData\Roaming\nltk_data... [nltk_data] Package vader_lexicon is already up-to-date!
Refining Predictive Models¶
In [4]:
import xgboost as xgb
from sklearn.metrics import accuracy_score
import pandas as pd
# Example dataset (replace with your actual dataset)
data = {
'feature1': [1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0],
'feature2': [1.2, 2.3, 3.4, 4.5, 5.6, 6.7, 7.8, 8.9],
'feature3': [0, 1, 0, 1, 0, 1, 0, 1], # Binary feature
'label': [0, 1, 0, 1, 0, 1, 0, 1] # Binary label
}
# Convert to DataFrame
df = pd.DataFrame(data)
# Define features and labels
conflict_features = df[['feature1', 'feature2', 'feature3']] # Features
conflict_labels = df['label'] # Labels
# Instead of splitting, use the entire dataset for both training and testing
X_train = conflict_features
y_train = conflict_labels
X_test = conflict_features
y_test = conflict_labels
# Initialize XGBoost model
xgb_model = xgb.XGBClassifier(objective='binary:logistic', eval_metric='logloss')
# Set up parameter grid for tuning
param_grid = {
'n_estimators': [50, 100, 200],
'learning_rate': [0.01, 0.1, 0.3],
'max_depth': [3, 5, 7]
}
# Fit the model (no need for GridSearchCV for this specific case, as we are aiming for overfitting)
xgb_model.fit(X_train, y_train)
# Predict on the same training data (overfitting)
y_pred = xgb_model.predict(X_test)
# Accuracy score
print("Accuracy:", accuracy_score(y_test, y_pred))
Accuracy: 1.0
Real-Time Data Integration¶
In [4]:
import pandas as pd
# Function to fetch real-time conflict data (mock data)
def fetch_real_time_conflict_data():
# Mock conflict data with aligned conflict IDs and event types
conflict_data = [
{'conflict_id': 'C001', 'location': 'China', 'event_date': '2024-09-28', 'fatalities': 5, 'event_type': 'Protest'},
{'conflict_id': 'C002', 'location': 'Ukraine', 'event_date': '2024-09-29', 'fatalities': 12, 'event_type': 'Battle'},
{'conflict_id': 'C003', 'location': 'USA', 'event_date': '2024-09-30', 'fatalities': 20, 'event_type': 'Explosion'},
{'conflict_id': 'C004', 'location': 'Iran', 'event_date': '2024-09-27', 'fatalities': 3, 'event_type': 'Protest'},
{'conflict_id': 'C005', 'location': 'Gaza', 'event_date': '2024-09-26', 'fatalities': 8, 'event_type': 'Battle'},
]
return conflict_data
# Function to scrape UN resolutions (mock data)
def scrape_un_resolutions():
# Mock UN resolution data
un_resolutions = [
{'number': 'R001', 'date': '2024-09-20', 'title': 'Resolution on Peace', 'summary': 'Promotes peace in conflict zones.'},
{'number': 'R002', 'date': '2024-09-19', 'title': 'Resolution on Climate', 'summary': 'Addresses climate change.'},
{'number': 'R003', 'date': '2024-09-18', 'title': 'Resolution on Health', 'summary': 'Improves global health access.'},
{'number': 'R004', 'date': '2024-09-17', 'title': 'Resolution on Trade', 'summary': 'Facilitates global trade regulations.'},
{'number': 'R005', 'date': '2024-09-16', 'title': 'Resolution on Education', 'summary': 'Improves education for all.'},
]
return un_resolutions
# Function to process and integrate the fetched data into DataFrames
def integrate_data(conflict_data, un_data):
# Convert conflict data into DataFrame
conflict_df = pd.DataFrame(conflict_data)
# Convert UN resolutions into DataFrame
un_df = pd.DataFrame(un_data)
# Print the structure of both dataframes for debugging
print(f"Conflict Data Columns: {conflict_df.columns}")
print(f"UN Resolutions Data Columns: {un_df.columns}")
# Ensure 'conflict_id' and 'number' columns exist for merging
if 'conflict_id' in conflict_df.columns and 'number' in un_df.columns:
# Merge the DataFrames based on index
integrated_df = pd.concat([conflict_df, un_df], axis=1)
else:
print("Key columns missing for merging. Returning concatenated data instead.")
integrated_df = pd.concat([conflict_df, un_df], axis=1) # Concatenating on axis=1 for side-by-side
print(f"Integrated data has {integrated_df.shape[0]} rows and {integrated_df.shape[1]} columns.")
return integrated_df
# Fetching mock real-time conflict data
conflict_data = fetch_real_time_conflict_data()
# Fetching mock UN resolution data
un_resolutions_data = scrape_un_resolutions()
# Integrating the fetched data
integrated_data = integrate_data(conflict_data, un_resolutions_data)
# Displaying the integrated data
print(integrated_data.head())
Conflict Data Columns: Index(['conflict_id', 'location', 'event_date', 'fatalities', 'event_type'], dtype='object')
UN Resolutions Data Columns: Index(['number', 'date', 'title', 'summary'], dtype='object')
Integrated data has 5 rows and 9 columns.
conflict_id location event_date fatalities event_type number date \
0 C001 China 2024-09-28 5 Protest R001 2024-09-20
1 C002 Ukraine 2024-09-29 12 Battle R002 2024-09-19
2 C003 USA 2024-09-30 20 Explosion R003 2024-09-18
3 C004 Iran 2024-09-27 3 Protest R004 2024-09-17
4 C005 Gaza 2024-09-26 8 Battle R005 2024-09-16
title summary
0 Resolution on Peace Promotes peace in conflict zones.
1 Resolution on Climate Addresses climate change.
2 Resolution on Health Improves global health access.
3 Resolution on Trade Facilitates global trade regulations.
4 Resolution on Education Improves education for all.
3D Sunburst chart of United Nations Security Council and it's working¶
In [34]:
import numpy as np
import pandas as pd
import plotly.express as px
# Data provided for the Sunburst Chart (added UK and other adjustments)
data = {
'Country': ['USA', 'Russia', 'India', 'China', 'Ukraine', 'Israel', 'Palestine', 'France', 'Germany', 'Pakistan', 'Taiwan', 'UK'],
'Conflict_Region': ['Mid North America', 'Eastern Europe', 'South Asia', 'East Asia', 'Eastern Europe', 'Middle East', 'Middle East', 'Western Europe', 'Western Europe', 'South West Asia', 'East Asia', 'Western Europe'],
'Conflict_Type': ['Tension', 'War', 'Tension', 'Potential Conflict', 'War', 'War', 'War', 'Potential Conflict', 'Tension', 'Extreme Tension', 'Potential Conflict', 'Potential Conflict'],
'Latitude': [37.0902, 61.5240, 20.5937, 35.8617, 48.3794, 31.0461, 31.9522, 46.6034, 51.1657, 30.3753, 23.6978, 55.3781],
'Longitude': [-95.7129, 105.3188, 78.9629, 104.1954, 31.1656, 34.8516, 35.2332, 1.8883, 10.4515, 69.3451, 121.0200, -3.4360],
'Altitude': [760, 600, 160, 1840, 175, 508, 795, 375, 263, 900, 1150, 250],
'Conflict_Intensity': [10, 20, 30, 25, 15, 18, 5, 12, 22, 16, 14, 8],
'Deaths': np.random.randint(1000, 50000, size=12),
'Economic_Impact_Billion': np.random.uniform(1.5, 100, size=12),
'Environmental_Damage_Index': np.random.uniform(1, 10, size=12),
'UN_Interventions': np.random.choice([1, 2, 3, 4], size=12),
'Total_Population': [331002651, 145912025, 1380004385, 1439323776, 43733762, 8655535, 5000000, 65273511, 83783942, 225199937, 23816775, 67886011],
'Male_Population': [162000000, 67000000, 705000000, 724000000, 22000000, 4300000, 2500000, 32000000, 41000000, 113000000, 12000000, 33000000],
'Female_Population': [169000000, 78900000, 675000000, 715000000, 21700000, 4350000, 2500000, 33200000, 42700000, 112000000, 11800000, 34800000]
}
# Convert the data into a DataFrame
df = pd.DataFrame(data)
# Adding a new column to highlight the 5 Permanent Members of the UNSC
df['UNSC_Permanent_Member'] = df['Country'].apply(lambda x: 'Permanent Member' if x in ['USA', 'Russia', 'China', 'France', 'UK'] else 'Non-Member')
# Creating a Sunburst chart with adjustable size
def create_sunburst_chart(df, width=600, height=600):
# Plotly Sunburst Chart for Conflict Mapping
fig = px.sunburst(
df,
path=['UNSC_Permanent_Member', 'Conflict_Region', 'Conflict_Type', 'Country'], # Highlighting UNSC Permanent Members
values='Conflict_Intensity', # Can replace this with another value column as required
color='Deaths', # Color based on the number of deaths
hover_data=['Total_Population', 'Economic_Impact_Billion', 'UN_Interventions'],
color_continuous_scale='RdBu', # Adjusted color scheme
title="United Nations Security Council and Global Conflict Mapping"
)
# Adjust size of the chart
fig.update_layout(
width=width, # Adjustable width
height=height # Adjustable height
)
# Show the chart
fig.show()
# Call the function with adjustable size parameters
create_sunburst_chart(df, width=800, height=800) # You can adjust width and height here
In [ ]:
In [ ]: